diff --git a/.buildkite/hooks/pre-command b/.buildkite/hooks/pre-command index 3a8f3e09e847e..42af0d79b34da 100644 --- a/.buildkite/hooks/pre-command +++ b/.buildkite/hooks/pre-command @@ -58,3 +58,19 @@ if [[ "${USE_LUCENE_SNAPSHOT_CREDS:-}" == "true" ]]; then unset data fi + +if [[ "${USE_DRA_CREDENTIALS:-}" == "true" ]]; then + DRA_VAULT_ROLE_ID_SECRET=$(vault read -field=role-id secret/ci/elastic-elasticsearch/legacy-vault-credentials) + export DRA_VAULT_ROLE_ID_SECRET + + DRA_VAULT_SECRET_ID_SECRET=$(vault read -field=secret-id secret/ci/elastic-elasticsearch/legacy-vault-credentials) + export DRA_VAULT_SECRET_ID_SECRET + + DRA_VAULT_ADDR=https://secrets.elastic.co:8200 + export DRA_VAULT_ADDR +fi + +if [[ "${USE_SNYK_CREDENTIALS:-}" == "true" ]]; then + SNYK_TOKEN=$(vault read -field=token secret/ci/elastic-elasticsearch/migrated/snyk) + export SNYK_TOKEN +fi diff --git a/.buildkite/pipelines/dra-workflow.yml b/.buildkite/pipelines/dra-workflow.yml new file mode 100644 index 0000000000000..336bb74041be3 --- /dev/null +++ b/.buildkite/pipelines/dra-workflow.yml @@ -0,0 +1,9 @@ +steps: + - command: .buildkite/scripts/dra-workflow.sh + env: + USE_DRA_CREDENTIALS: "true" + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2204 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/ecs-dynamic-template-tests.yml b/.buildkite/pipelines/ecs-dynamic-template-tests.yml new file mode 100644 index 0000000000000..b1fe972b724f1 --- /dev/null +++ b/.buildkite/pipelines/ecs-dynamic-template-tests.yml @@ -0,0 +1,9 @@ +steps: + - label: ecs-dynamic-templates-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.configure_test_clusters_with_one_processor=true :x-pack:plugin:stack:javaRestTest + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 diff --git a/.buildkite/pipelines/intake.template.yml b/.buildkite/pipelines/intake.template.yml new file mode 100644 index 0000000000000..8a9c153da4e0d --- /dev/null +++ b/.buildkite/pipelines/intake.template.yml @@ -0,0 +1,66 @@ +steps: + - label: sanity-check + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - label: part1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - group: bwc-snapshots + steps: + - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + BWC_VERSION: $BWC_LIST + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: "{{matrix.BWC_VERSION}}" + - label: rest-compat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - trigger: elasticsearch-dra-workflow + label: Trigger DRA snapshot workflow + async: true + build: + branch: "$BUILDKITE_BRANCH" + commit: "$BUILDKITE_COMMIT" + env: + DRA_WORKFLOW: snapshot diff --git a/.buildkite/pipelines/intake.yml b/.buildkite/pipelines/intake.yml new file mode 100644 index 0000000000000..bdff41788b563 --- /dev/null +++ b/.buildkite/pipelines/intake.yml @@ -0,0 +1,67 @@ +# This file is auto-generated. See .buildkite/pipelines/intake.template.yml +steps: + - label: sanity-check + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files precommit + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - label: part1 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart1 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part2 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart2 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: part3 + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkPart3 + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - group: bwc-snapshots + steps: + - label: "{{matrix.BWC_VERSION}} / bwc-snapshots" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files v$$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + matrix: + setup: + BWC_VERSION: ["7.17.14", "8.9.3", "8.10.0", "8.11.0"] + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: "{{matrix.BWC_VERSION}}" + - label: rest-compat + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dorg.elasticsearch.build.cache.push=true -Dignore.tests.seed -Dscan.capture-task-input-files checkRestCompat + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - wait + - trigger: elasticsearch-dra-workflow + label: Trigger DRA snapshot workflow + async: true + build: + branch: "$BUILDKITE_BRANCH" + commit: "$BUILDKITE_COMMIT" + env: + DRA_WORKFLOW: snapshot diff --git a/.buildkite/pipelines/periodic-packaging.bwc.template.yml b/.buildkite/pipelines/periodic-packaging.bwc.template.yml new file mode 100644 index 0000000000000..0ec7721381d07 --- /dev/null +++ b/.buildkite/pipelines/periodic-packaging.bwc.template.yml @@ -0,0 +1,15 @@ + - label: "{{matrix.image}} / $BWC_VERSION / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v$BWC_VERSION + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: $BWC_VERSION diff --git a/.buildkite/pipelines/periodic-packaging.template.yml b/.buildkite/pipelines/periodic-packaging.template.yml new file mode 100644 index 0000000000000..1f1852639e997 --- /dev/null +++ b/.buildkite/pipelines/periodic-packaging.template.yml @@ -0,0 +1,52 @@ +steps: + - group: packaging-tests-unix + steps: + - label: "{{matrix.image}} / packaging-tests-unix" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructivePackagingTest + timeout_in_minutes: 300 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-16-32768 + env: {} + - group: packaging-tests-upgrade + steps: $BWC_STEPS + - group: packaging-tests-windows + steps: + - label: "{{matrix.image}} / packaging-tests-windows" + command: | + .\.buildkite\scripts\run-script.ps1 .\.ci\scripts\packaging-test.ps1 + timeout_in_minutes: 180 + matrix: + setup: + image: + - windows-2016 + - windows-2019 + - windows-2022 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: {} diff --git a/.buildkite/pipelines/periodic-packaging.yml b/.buildkite/pipelines/periodic-packaging.yml new file mode 100644 index 0000000000000..3b70b82746ed8 --- /dev/null +++ b/.buildkite/pipelines/periodic-packaging.yml @@ -0,0 +1,1653 @@ +# This file is auto-generated. See .buildkite/pipelines/periodic-packaging.template.yml +steps: + - group: packaging-tests-unix + steps: + - label: "{{matrix.image}} / packaging-tests-unix" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ destructivePackagingTest + timeout_in_minutes: 300 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-16-32768 + env: {} + - group: packaging-tests-upgrade + steps: + - label: "{{matrix.image}} / 7.0.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.0.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.0 + + - label: "{{matrix.image}} / 7.0.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.0.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.1 + + - label: "{{matrix.image}} / 7.1.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.1.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.0 + + - label: "{{matrix.image}} / 7.1.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.1.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.1 + + - label: "{{matrix.image}} / 7.2.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.2.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.0 + + - label: "{{matrix.image}} / 7.2.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.2.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.1 + + - label: "{{matrix.image}} / 7.3.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.0 + + - label: "{{matrix.image}} / 7.3.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.1 + + - label: "{{matrix.image}} / 7.3.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.3.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.2 + + - label: "{{matrix.image}} / 7.4.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.0 + + - label: "{{matrix.image}} / 7.4.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.1 + + - label: "{{matrix.image}} / 7.4.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.4.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.2 + + - label: "{{matrix.image}} / 7.5.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.0 + + - label: "{{matrix.image}} / 7.5.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.1 + + - label: "{{matrix.image}} / 7.5.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.5.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.2 + + - label: "{{matrix.image}} / 7.6.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.0 + + - label: "{{matrix.image}} / 7.6.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.1 + + - label: "{{matrix.image}} / 7.6.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.6.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.2 + + - label: "{{matrix.image}} / 7.7.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.7.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.0 + + - label: "{{matrix.image}} / 7.7.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.7.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.1 + + - label: "{{matrix.image}} / 7.8.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.8.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.0 + + - label: "{{matrix.image}} / 7.8.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.8.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.1 + + - label: "{{matrix.image}} / 7.9.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.0 + + - label: "{{matrix.image}} / 7.9.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.1 + + - label: "{{matrix.image}} / 7.9.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.2 + + - label: "{{matrix.image}} / 7.9.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.9.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.3 + + - label: "{{matrix.image}} / 7.10.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.0 + + - label: "{{matrix.image}} / 7.10.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.1 + + - label: "{{matrix.image}} / 7.10.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.10.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.2 + + - label: "{{matrix.image}} / 7.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.0 + + - label: "{{matrix.image}} / 7.11.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.1 + + - label: "{{matrix.image}} / 7.11.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.11.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.2 + + - label: "{{matrix.image}} / 7.12.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.12.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.0 + + - label: "{{matrix.image}} / 7.12.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.12.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.1 + + - label: "{{matrix.image}} / 7.13.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.0 + + - label: "{{matrix.image}} / 7.13.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.1 + + - label: "{{matrix.image}} / 7.13.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.2 + + - label: "{{matrix.image}} / 7.13.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.3 + + - label: "{{matrix.image}} / 7.13.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.13.4 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.4 + + - label: "{{matrix.image}} / 7.14.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.0 + + - label: "{{matrix.image}} / 7.14.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.1 + + - label: "{{matrix.image}} / 7.14.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.14.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.2 + + - label: "{{matrix.image}} / 7.15.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.0 + + - label: "{{matrix.image}} / 7.15.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.1 + + - label: "{{matrix.image}} / 7.15.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.15.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.2 + + - label: "{{matrix.image}} / 7.16.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.0 + + - label: "{{matrix.image}} / 7.16.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.1 + + - label: "{{matrix.image}} / 7.16.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.2 + + - label: "{{matrix.image}} / 7.16.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.16.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.3 + + - label: "{{matrix.image}} / 7.17.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.0 + + - label: "{{matrix.image}} / 7.17.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.1 + + - label: "{{matrix.image}} / 7.17.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.2 + + - label: "{{matrix.image}} / 7.17.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.3 + + - label: "{{matrix.image}} / 7.17.4 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.4 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.4 + + - label: "{{matrix.image}} / 7.17.5 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.5 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.5 + + - label: "{{matrix.image}} / 7.17.6 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.6 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.6 + + - label: "{{matrix.image}} / 7.17.7 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.7 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.7 + + - label: "{{matrix.image}} / 7.17.8 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.8 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.8 + + - label: "{{matrix.image}} / 7.17.9 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.9 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.9 + + - label: "{{matrix.image}} / 7.17.10 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.10 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.10 + + - label: "{{matrix.image}} / 7.17.11 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.11 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.11 + + - label: "{{matrix.image}} / 7.17.12 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.12 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.12 + + - label: "{{matrix.image}} / 7.17.13 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.13 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.13 + + - label: "{{matrix.image}} / 7.17.14 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v7.17.14 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.14 + + - label: "{{matrix.image}} / 8.0.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.0 + + - label: "{{matrix.image}} / 8.0.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.0.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.1 + + - label: "{{matrix.image}} / 8.1.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.0 + + - label: "{{matrix.image}} / 8.1.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.1 + + - label: "{{matrix.image}} / 8.1.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.2 + + - label: "{{matrix.image}} / 8.1.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.1.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.3 + + - label: "{{matrix.image}} / 8.2.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.0 + + - label: "{{matrix.image}} / 8.2.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.1 + + - label: "{{matrix.image}} / 8.2.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.2 + + - label: "{{matrix.image}} / 8.2.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.2.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.3 + + - label: "{{matrix.image}} / 8.3.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.0 + + - label: "{{matrix.image}} / 8.3.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.1 + + - label: "{{matrix.image}} / 8.3.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.2 + + - label: "{{matrix.image}} / 8.3.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.3.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.3 + + - label: "{{matrix.image}} / 8.4.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.0 + + - label: "{{matrix.image}} / 8.4.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.1 + + - label: "{{matrix.image}} / 8.4.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.2 + + - label: "{{matrix.image}} / 8.4.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.4.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.3 + + - label: "{{matrix.image}} / 8.5.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.0 + + - label: "{{matrix.image}} / 8.5.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.1 + + - label: "{{matrix.image}} / 8.5.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.2 + + - label: "{{matrix.image}} / 8.5.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.5.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.3 + + - label: "{{matrix.image}} / 8.6.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.6.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.0 + + - label: "{{matrix.image}} / 8.6.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.6.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.1 + + - label: "{{matrix.image}} / 8.6.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.6.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.2 + + - label: "{{matrix.image}} / 8.7.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.7.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.0 + + - label: "{{matrix.image}} / 8.7.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.7.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.1 + + - label: "{{matrix.image}} / 8.8.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.8.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.0 + + - label: "{{matrix.image}} / 8.8.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.8.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.1 + + - label: "{{matrix.image}} / 8.8.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.8.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.2 + + - label: "{{matrix.image}} / 8.9.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.0 + + - label: "{{matrix.image}} / 8.9.1 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.1 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.1 + + - label: "{{matrix.image}} / 8.9.2 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.2 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.2 + + - label: "{{matrix.image}} / 8.9.3 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.9.3 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.3 + + - label: "{{matrix.image}} / 8.10.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.10.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.10.0 + + - label: "{{matrix.image}} / 8.11.0 / packaging-tests-upgrade" + command: ./.ci/scripts/packaging-test.sh --build-cache -Dorg.elasticsearch.build.cache.url=https://gradle-enterprise.elastic.co/cache/ -Dbwc.checkout.align=true destructiveDistroUpgradeTest.v8.11.0 + timeout_in_minutes: 300 + matrix: + setup: + image: + - rocky-8 + - ubuntu-2004 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-16-32768 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.0 + + - group: packaging-tests-windows + steps: + - label: "{{matrix.image}} / packaging-tests-windows" + command: | + .\.buildkite\scripts\run-script.ps1 .\.ci\scripts\packaging-test.ps1 + timeout_in_minutes: 180 + matrix: + setup: + image: + - windows-2016 + - windows-2019 + - windows-2022 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: {} diff --git a/.buildkite/pipelines/periodic-platform-support.yml b/.buildkite/pipelines/periodic-platform-support.yml new file mode 100644 index 0000000000000..520089286ec36 --- /dev/null +++ b/.buildkite/pipelines/periodic-platform-support.yml @@ -0,0 +1,82 @@ +steps: + - group: platform-support-unix + steps: + - label: "{{matrix.image}} / platform-support-unix" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true check + timeout_in_minutes: 420 + matrix: + setup: + image: + - centos-7 + - debian-10 + - debian-11 + - opensuse-leap-15 + - oraclelinux-7 + - oraclelinux-8 + - sles-12 + - sles-15 + - ubuntu-1804 + - ubuntu-2004 + - ubuntu-2204 + - rocky-8 + - rhel-7 + - rhel-8 + - rhel-9 + - almalinux-8 + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + diskSizeGb: 350 + machineType: custom-32-98304 + env: {} + - group: platform-support-windows + steps: + - label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-windows" + command: | + .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh + timeout_in_minutes: 420 + matrix: + setup: + image: + - windows-2016 + - windows-2019 + - windows-2022 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: gcp + image: family/elasticsearch-{{matrix.image}} + machineType: custom-32-98304 + diskType: pd-ssd + diskSizeGb: 350 + env: + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - group: platform-support-arm + steps: + - label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-arm" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true {{matrix.GRADLE_TASK}} + timeout_in_minutes: 420 + matrix: + setup: + image: + - almalinux-8-aarch64 + - ubuntu-2004-aarch64 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: aws + imagePrefix: elasticsearch-{{matrix.image}} + instanceType: m6g.8xlarge + diskSizeGb: 350 + diskType: gp3 + diskName: /dev/sda1 + env: + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" diff --git a/.buildkite/pipelines/periodic.bwc.template.yml b/.buildkite/pipelines/periodic.bwc.template.yml new file mode 100644 index 0000000000000..8a8c43d75e3ef --- /dev/null +++ b/.buildkite/pipelines/periodic.bwc.template.yml @@ -0,0 +1,10 @@ + - label: $BWC_VERSION / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v$BWC_VERSION#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: $BWC_VERSION \ No newline at end of file diff --git a/.buildkite/pipelines/periodic.template.yml b/.buildkite/pipelines/periodic.template.yml new file mode 100644 index 0000000000000..e4f844afc3f41 --- /dev/null +++ b/.buildkite/pipelines/periodic.template.yml @@ -0,0 +1,115 @@ +steps: + - group: bwc + steps: $BWC_STEPS + - label: concurrent-search-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: encryption-at-rest + command: .buildkite/scripts/encryption-at-rest.sh + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: eql-correctness + command: .buildkite/scripts/eql-correctness.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: example-plugins + command: |- + cd $$WORKSPACE/plugins/examples + + $$WORKSPACE/.ci/scripts/run-gradle.sh build + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - group: java-fips-matrix + steps: + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true $$GRADLE_TASK + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - openjdk17 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - group: java-matrix + steps: + - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true $$GRADLE_TASK + timeout_in_minutes: 300 + matrix: + setup: + ES_RUNTIME_JAVA: + - graalvm-ce17 + - openjdk17 + - openjdk18 + - openjdk19 + - openjdk20 + - openjdk21 + GRADLE_TASK: + - checkPart1 + - checkPart2 + - checkPart3 + - bwcTestSnapshots + - checkRestCompat + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" + GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: release-tests + command: .buildkite/scripts/release-tests.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: single-processor-node-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.configure_test_clusters_with_one_processor=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: Upload Snyk Dependency Graph + command: .ci/scripts/run-gradle.sh uploadSnykDependencyGraph -PsnykTargetReference=$BUILDKITE_BRANCH + env: + USE_SNYK_CREDENTIALS: "true" + timeout_in_minutes: 20 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n2-standard-8 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/pipelines/periodic.trigger.yml b/.buildkite/pipelines/periodic.trigger.yml deleted file mode 100644 index 5d5d592448b5d..0000000000000 --- a/.buildkite/pipelines/periodic.trigger.yml +++ /dev/null @@ -1,16 +0,0 @@ -steps: - - trigger: elasticsearch-periodic - label: Trigger periodic pipeline for main - async: true - build: - branch: main - - trigger: elasticsearch-periodic - label: Trigger periodic pipeline for 8.9 - async: true - build: - branch: "8.9" - - trigger: elasticsearch-periodic - label: Trigger periodic pipeline for 7.17 - async: true - build: - branch: "7.17" diff --git a/.buildkite/pipelines/periodic.yml b/.buildkite/pipelines/periodic.yml index 95dba6e1d44f3..29aec69bf3832 100644 --- a/.buildkite/pipelines/periodic.yml +++ b/.buildkite/pipelines/periodic.yml @@ -1,9 +1,1047 @@ +# This file is auto-generated. See .buildkite/pipelines/periodic.template.yml steps: + - group: bwc + steps: + - label: 7.0.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.0 + - label: 7.0.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.0.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.0.1 + - label: 7.1.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.0 + - label: 7.1.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.1.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.1.1 + - label: 7.2.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.2.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.0 + - label: 7.2.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.2.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.2.1 + - label: 7.3.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.0 + - label: 7.3.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.1 + - label: 7.3.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.3.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.3.2 + - label: 7.4.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.0 + - label: 7.4.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.1 + - label: 7.4.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.4.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.4.2 + - label: 7.5.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.0 + - label: 7.5.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.1 + - label: 7.5.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.5.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.5.2 + - label: 7.6.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.0 + - label: 7.6.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.1 + - label: 7.6.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.6.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.6.2 + - label: 7.7.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.7.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.0 + - label: 7.7.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.7.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.7.1 + - label: 7.8.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.8.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.0 + - label: 7.8.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.8.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.8.1 + - label: 7.9.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.0 + - label: 7.9.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.1 + - label: 7.9.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.2 + - label: 7.9.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.9.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.9.3 + - label: 7.10.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.0 + - label: 7.10.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.1 + - label: 7.10.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.10.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.10.2 + - label: 7.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.0 + - label: 7.11.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.1 + - label: 7.11.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.11.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.11.2 + - label: 7.12.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.12.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.0 + - label: 7.12.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.12.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.12.1 + - label: 7.13.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.0 + - label: 7.13.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.1 + - label: 7.13.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.2 + - label: 7.13.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.3 + - label: 7.13.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.13.4#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.13.4 + - label: 7.14.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.0 + - label: 7.14.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.1 + - label: 7.14.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.14.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.14.2 + - label: 7.15.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.0 + - label: 7.15.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.1 + - label: 7.15.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.15.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.15.2 + - label: 7.16.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.0 + - label: 7.16.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.1 + - label: 7.16.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.2 + - label: 7.16.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.16.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.16.3 + - label: 7.17.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.0 + - label: 7.17.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.1 + - label: 7.17.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.2 + - label: 7.17.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.3 + - label: 7.17.4 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.4#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.4 + - label: 7.17.5 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.5#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.5 + - label: 7.17.6 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.6#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.6 + - label: 7.17.7 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.7#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.7 + - label: 7.17.8 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.8#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.8 + - label: 7.17.9 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.9#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.9 + - label: 7.17.10 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.10#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.10 + - label: 7.17.11 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.11#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.11 + - label: 7.17.12 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.12#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.12 + - label: 7.17.13 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.13#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.13 + - label: 7.17.14 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v7.17.14#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 7.17.14 + - label: 8.0.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.0 + - label: 8.0.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.0.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.0.1 + - label: 8.1.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.0 + - label: 8.1.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.1 + - label: 8.1.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.2 + - label: 8.1.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.1.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.1.3 + - label: 8.2.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.0 + - label: 8.2.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.1 + - label: 8.2.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.2 + - label: 8.2.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.2.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.2.3 + - label: 8.3.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.0 + - label: 8.3.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.1 + - label: 8.3.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.2 + - label: 8.3.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.3.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.3.3 + - label: 8.4.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.0 + - label: 8.4.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.1 + - label: 8.4.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.2 + - label: 8.4.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.4.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.4.3 + - label: 8.5.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.0 + - label: 8.5.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.1 + - label: 8.5.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.2 + - label: 8.5.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.5.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.5.3 + - label: 8.6.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.6.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.0 + - label: 8.6.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.6.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.1 + - label: 8.6.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.6.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.6.2 + - label: 8.7.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.7.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.0 + - label: 8.7.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.7.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.7.1 + - label: 8.8.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.8.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.0 + - label: 8.8.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.8.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.1 + - label: 8.8.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.8.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.8.2 + - label: 8.9.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.0 + - label: 8.9.1 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.1#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.1 + - label: 8.9.2 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.2#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.2 + - label: 8.9.3 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.9.3#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.9.3 + - label: 8.10.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.10.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.10.0 + - label: 8.11.0 / bwc + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true v8.11.0#bwcTest + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + env: + BWC_VERSION: 8.11.0 + - label: concurrent-search-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.jvm.argline=-Des.concurrent_search=true -Des.concurrent_search=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: encryption-at-rest + command: .buildkite/scripts/encryption-at-rest.sh + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: eql-correctness + command: .buildkite/scripts/eql-correctness.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk + - label: example-plugins + command: |- + cd $$WORKSPACE/plugins/examples + + $$WORKSPACE/.ci/scripts/run-gradle.sh build + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: custom-32-98304 + buildDirectory: /dev/shm/bk - group: java-fips-matrix steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-fips-matrix" command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.fips.enabled=true $$GRADLE_TASK - timeout_in_minutes: 180 + timeout_in_minutes: 300 matrix: setup: ES_RUNTIME_JAVA: @@ -26,7 +1064,7 @@ steps: steps: - label: "{{matrix.ES_RUNTIME_JAVA}} / {{matrix.GRADLE_TASK}} / java-matrix" command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true $$GRADLE_TASK - timeout_in_minutes: 180 + timeout_in_minutes: 300 matrix: setup: ES_RUNTIME_JAVA: @@ -50,50 +1088,29 @@ steps: env: ES_RUNTIME_JAVA: "{{matrix.ES_RUNTIME_JAVA}}" GRADLE_TASK: "{{matrix.GRADLE_TASK}}" - - group: packaging-tests-windows - steps: - - label: "{{matrix.image}} / packaging-tests-windows" - command: | - .\.buildkite\scripts\run-script.ps1 .\.ci\scripts\packaging-test.ps1 - timeout_in_minutes: 180 - matrix: - setup: - image: - - windows-2016 - - windows-2019 - - windows-2022 - agents: - provider: gcp - image: family/brian-elasticsearch-{{matrix.image}} - imageProject: elastic-images-qa - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 - env: {} - - group: platform-support-windows - steps: - - label: "{{matrix.image}} / {{matrix.GRADLE_TASK}} / platform-support-windows" - command: | - .\.buildkite\scripts\run-script.ps1 bash .buildkite/scripts/windows-run-gradle.sh - timeout_in_minutes: 360 - matrix: - setup: - image: - - windows-2016 - - windows-2019 - - windows-2022 - GRADLE_TASK: - - checkPart1 - - checkPart2 - - checkPart3 - - bwcTestSnapshots - - checkRestCompat - agents: - provider: gcp - image: family/brian-elasticsearch-{{matrix.image}} - imageProject: elastic-images-qa - machineType: custom-32-98304 - diskType: pd-ssd - diskSizeGb: 350 - env: - GRADLE_TASK: "{{matrix.GRADLE_TASK}}" + - label: release-tests + command: .buildkite/scripts/release-tests.sh + timeout_in_minutes: 300 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: single-processor-node-tests + command: .ci/scripts/run-gradle.sh -Dbwc.checkout.align=true -Dtests.configure_test_clusters_with_one_processor=true check + timeout_in_minutes: 420 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + diskSizeGb: 350 + machineType: custom-32-98304 + - label: Upload Snyk Dependency Graph + command: .ci/scripts/run-gradle.sh uploadSnykDependencyGraph -PsnykTargetReference=$BUILDKITE_BRANCH + env: + USE_SNYK_CREDENTIALS: "true" + timeout_in_minutes: 20 + agents: + provider: gcp + image: family/elasticsearch-ubuntu-2004 + machineType: n2-standard-8 + buildDirectory: /dev/shm/bk diff --git a/.buildkite/scripts/branches.sh b/.buildkite/scripts/branches.sh new file mode 100755 index 0000000000000..886fa59e4d02c --- /dev/null +++ b/.buildkite/scripts/branches.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +# This determines which branches will have pipelines triggered periodically, for dra workflows. +BRANCHES=( $(cat branches.json | jq -r '.branches[].branch') ) diff --git a/.buildkite/scripts/dra-update-staging.sh b/.buildkite/scripts/dra-update-staging.sh new file mode 100755 index 0000000000000..676361bf1cfcf --- /dev/null +++ b/.buildkite/scripts/dra-update-staging.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +set -euo pipefail + +source .buildkite/scripts/branches.sh + +for BRANCH in "${BRANCHES[@]}"; do + # Don't publish main branch to staging + if [[ "$BRANCH" == "main" ]]; then + continue + fi + + echo "--- Checking $BRANCH" + + BEATS_MANIFEST=$(curl -sS "https://artifacts-staging.elastic.co/beats/latest/${BRANCH}.json" | jq -r '.manifest_url') + ML_MANIFEST=$(curl -sS "https://artifacts-staging.elastic.co/ml-cpp/latest/${BRANCH}.json" | jq -r '.manifest_url') + ES_MANIFEST=$(curl -sS "https://artifacts-staging.elastic.co/elasticsearch/latest/${BRANCH}.json" | jq -r '.manifest_url') + + ES_BEATS_DEPENDENCY=$(curl -sS "$ES_MANIFEST" | jq -r '.projects.elasticsearch.dependencies[] | select(.prefix == "beats") | .build_uri') + ES_ML_DEPENDENCY=$(curl -sS "$ES_MANIFEST" | jq -r '.projects.elasticsearch.dependencies[] | select(.prefix == "ml-cpp") | .build_uri') + + SHOULD_TRIGGER="" + + if [ "$BEATS_MANIFEST" = "$ES_BEATS_DEPENDENCY" ]; then + echo "ES has the latest beats" + else + echo "Need to trigger a build, $BEATS_MANIFEST available but ES has $ES_BEATS_DEPENDENCY" + SHOULD_TRIGGER=true + fi + + if [ "$ML_MANIFEST" = "$ES_ML_DEPENDENCY" ]; then + echo "ES has the latest ml-cpp" + else + echo "Need to trigger a build, $ML_MANIFEST available but ES has $ES_ML_DEPENDENCY" + SHOULD_TRIGGER=true + fi + + if [[ "$SHOULD_TRIGGER" == "true" ]]; then + echo "Triggering DRA staging workflow for $BRANCH" + cat << EOF | buildkite-agent pipeline upload +steps: + - trigger: elasticsearch-dra-workflow + label: Trigger DRA staging workflow for $BRANCH + async: true + build: + branch: "$BRANCH" + env: + DRA_WORKFLOW: staging +EOF + fi +done diff --git a/.buildkite/scripts/dra-workflow.sh b/.buildkite/scripts/dra-workflow.sh new file mode 100755 index 0000000000000..02c8d9bc722b4 --- /dev/null +++ b/.buildkite/scripts/dra-workflow.sh @@ -0,0 +1,92 @@ +#!/bin/bash + +set -euo pipefail + +WORKFLOW="${DRA_WORKFLOW:-snapshot}" +BRANCH="${BUILDKITE_BRANCH:-}" + +# Don't publish main branch to staging +if [[ "$BRANCH" == "main" && "$WORKFLOW" == "staging" ]]; then + exit 0 +fi + +echo --- Preparing + +# TODO move this to image +sudo apt-get update -y +sudo apt-get install -y libxml2-utils python3.10-venv + +RM_BRANCH="$BRANCH" +if [[ "$BRANCH" == "main" ]]; then + RM_BRANCH=master +fi + +ES_VERSION=$(grep elasticsearch build-tools-internal/version.properties | sed "s/elasticsearch *= *//g") + +VERSION_SUFFIX="" +if [[ "$WORKFLOW" == "snapshot" ]]; then + VERSION_SUFFIX="-SNAPSHOT" +fi + +BEATS_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh beats "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")" +ML_CPP_BUILD_ID="$(./.ci/scripts/resolve-dra-manifest.sh ml-cpp "$RM_BRANCH" "$ES_VERSION" "$WORKFLOW")" + +LICENSE_KEY_ARG="" +BUILD_SNAPSHOT_ARG="" + +if [[ "$WORKFLOW" == "staging" ]]; then + LICENSE_KEY=$(mktemp -d)/license.key + # Notice that only the public key is being read here, which isn't really secret + vault read -field pubkey secret/ci/elastic-elasticsearch/migrated/license | base64 --decode > "$LICENSE_KEY" + LICENSE_KEY_ARG="-Dlicense.key=$LICENSE_KEY" + + BUILD_SNAPSHOT_ARG="-Dbuild.snapshot=false" +fi + +echo --- Building release artifacts + +.ci/scripts/run-gradle.sh -Ddra.artifacts=true \ + -Ddra.artifacts.dependency.beats="${BEATS_BUILD_ID}" \ + -Ddra.artifacts.dependency.ml-cpp="${ML_CPP_BUILD_ID}" \ + -Ddra.workflow="$WORKFLOW" \ + -Dcsv="$WORKSPACE/build/distributions/dependencies-${ES_VERSION}${VERSION_SUFFIX}.csv" \ + $LICENSE_KEY_ARG \ + $BUILD_SNAPSHOT_ARG \ + buildReleaseArtifacts \ + exportCompressedDockerImages \ + :distribution:generateDependenciesReport + +PATH="$PATH:${JAVA_HOME}/bin" # Required by the following script +x-pack/plugin/sql/connectors/tableau/package.sh asm qualifier="$VERSION_SUFFIX" + +# we regenerate this file as part of the release manager invocation +rm "build/distributions/elasticsearch-jdbc-${ES_VERSION}${VERSION_SUFFIX}.taco.sha512" + +# Allow other users access to read the artifacts so they are readable in the +# container +find "$WORKSPACE" -type f -path "*/build/distributions/*" -exec chmod a+r {} \; + +# Allow other users write access to create checksum files +find "$WORKSPACE" -type d -path "*/build/distributions" -exec chmod a+w {} \; + +echo --- Running release-manager + +exit 0 + +# Artifacts should be generated +docker run --rm \ + --name release-manager \ + -e VAULT_ADDR="$DRA_VAULT_ADDR" \ + -e VAULT_ROLE_ID="$DRA_VAULT_ROLE_ID_SECRET" \ + -e VAULT_SECRET_ID="$DRA_VAULT_SECRET_ID_SECRET" \ + --mount type=bind,readonly=false,src="$PWD",target=/artifacts \ + docker.elastic.co/infra/release-manager:latest \ + cli collect \ + --project elasticsearch \ + --branch "$RM_BRANCH" \ + --commit "$BUILDKITE_COMMIT" \ + --workflow "$WORKFLOW" \ + --version "$ES_VERSION" \ + --artifact-set main \ + --dependency "beats:https://artifacts-${WORKFLOW}.elastic.co/beats/${BEATS_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" \ + --dependency "ml-cpp:https://artifacts-${WORKFLOW}.elastic.co/ml-cpp/${ML_CPP_BUILD_ID}/manifest-${ES_VERSION}${VERSION_SUFFIX}.json" diff --git a/.buildkite/scripts/dra-workflow.trigger.sh b/.buildkite/scripts/dra-workflow.trigger.sh new file mode 100755 index 0000000000000..5ef756c30bccc --- /dev/null +++ b/.buildkite/scripts/dra-workflow.trigger.sh @@ -0,0 +1,28 @@ +#!/bin/bash + +set -euo pipefail + +echo "steps:" + +source .buildkite/scripts/branches.sh + +for BRANCH in "${BRANCHES[@]}"; do + if [[ "$BRANCH" == "main" ]]; then + continue + fi + + INTAKE_PIPELINE_SLUG="elasticsearch-intake" + BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=${BRANCH}&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') + LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + + cat < "${eql_test_credentials_file}" + +.ci/scripts/run-gradle.sh -Dignore.tests.seed :x-pack:plugin:eql:qa:correctness:check diff --git a/.buildkite/scripts/periodic.trigger.sh b/.buildkite/scripts/periodic.trigger.sh new file mode 100644 index 0000000000000..36d106e87ee9c --- /dev/null +++ b/.buildkite/scripts/periodic.trigger.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +set -euo pipefail + +exit 0 + +echo "steps:" + +source .buildkite/scripts/branches.sh + +for BRANCH in "${BRANCHES[@]}"; do + INTAKE_PIPELINE_SLUG="elasticsearch-intake" + BUILD_JSON=$(curl -sH "Authorization: Bearer ${BUILDKITE_API_TOKEN}" "https://api.buildkite.com/v2/organizations/elastic/pipelines/${INTAKE_PIPELINE_SLUG}/builds?branch=${BRANCH}&state=passed&per_page=1" | jq '.[0] | {commit: .commit, url: .web_url}') + LAST_GOOD_COMMIT=$(echo "${BUILD_JSON}" | jq -r '.commit') + + cat <test-mute' - 'test-full-bwc' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml index 350f9bb31b646..eedbf7bba5789 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+cloud-deploy.yml @@ -23,6 +23,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'cloud-deploy' builders: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml index 3ec52a3f39663..77c499f455b22 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+docs-check.yml @@ -20,6 +20,7 @@ cancel-builds-on-update: true included-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' builders: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml index f02db4fb099a3..fcdbf2ea87084 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+eql-correctness.yml @@ -22,6 +22,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' builders: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml index cf01de3e782c0..5276d39f956d3 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+full-bwc.yml @@ -22,6 +22,7 @@ cancel-builds-on-update: true excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'test-full-bwc' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml index e1687e5a1cc3a..c283da8e32479 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix-sample.yml @@ -23,6 +23,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' - ':Delivery/Packaging' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml index b35b252114694..95a4c4273ebb7 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-unix.yml @@ -23,6 +23,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - ':Delivery/Packaging' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml index 9d7410ea054a2..ecd4a1a084755 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-nojdk.yml @@ -27,6 +27,7 @@ - 7.16 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - ':Delivery/Packaging' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml index 30881d45d2312..091dcf9eb77a0 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample-nojdk.yml @@ -27,6 +27,7 @@ - 7.16 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' - ':Delivery/Packaging' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml index 2a02b4fdf2cf5..a438335529a3c 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows-sample.yml @@ -26,6 +26,7 @@ - 7.16 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' - ':Delivery/Packaging' diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml index 0d5f4477be1d5..1dc0127284c47 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-tests-windows.yml @@ -27,6 +27,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - ':Delivery/Packaging' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml index 74769eaaa5c0b..01770b7d94c6e 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+packaging-upgrade-tests.yml @@ -24,6 +24,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - ':Delivery/Packaging' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml index e2d586ef468e3..793ade87a1fd9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-fips.yml @@ -22,6 +22,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'Team:Security' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml index ffebe228f04af..9a55d8dc6eeac 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-1-windows.yml @@ -23,6 +23,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'test-windows' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml index f900317156adc..0795172b916e2 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-fips.yml @@ -22,6 +22,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'Team:Security' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml index a26353f7a8e86..de09d5044d466 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-2-windows.yml @@ -23,6 +23,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'test-windows' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml index b6ce8dc20b771..3383e81ae61ed 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-fips.yml @@ -23,6 +23,7 @@ - 7.17 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'Team:Security' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml index d5d25573059e7..65ed5c58335a9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3-windows.yml @@ -24,6 +24,7 @@ - 7.17 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'test-windows' black-list-labels: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml index edc8f994bf506..325b9ecb68fd9 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+part-3.yml @@ -20,6 +20,7 @@ cancel-builds-on-update: true excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' black-list-target-branches: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml index 6aa2888abb0da..98ada6e2080e4 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+release-tests.yml @@ -22,6 +22,7 @@ cancel-builds-on-update: true excluded-regions: - ^docs/.* + - ^x-pack/docs/.* white-list-labels: - 'test-release' black-list-target-branches: diff --git a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml index 5ab81d7ceb45d..417ce525880d6 100644 --- a/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml +++ b/.ci/jobs.t/elastic+elasticsearch+pull-request+rest-compatibility.yml @@ -25,6 +25,7 @@ - 6.8 excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' builders: diff --git a/.ci/snapshotBwcVersions b/.ci/snapshotBwcVersions index 6ad91910ab7b0..da5aa1cdd64e1 100644 --- a/.ci/snapshotBwcVersions +++ b/.ci/snapshotBwcVersions @@ -1,5 +1,5 @@ BWC_VERSION: - - "7.17.13" - - "8.9.2" + - "7.17.14" + - "8.9.3" - "8.10.0" - "8.11.0" diff --git a/.ci/templates.t/pull-request-gradle-unix.yml b/.ci/templates.t/pull-request-gradle-unix.yml index 995dc9047b1fa..b4b5c48739097 100644 --- a/.ci/templates.t/pull-request-gradle-unix.yml +++ b/.ci/templates.t/pull-request-gradle-unix.yml @@ -20,6 +20,7 @@ cancel-builds-on-update: true excluded-regions: - ^docs/.* + - ^x-pack/docs/.* black-list-labels: - '>test-mute' builders: diff --git a/README.asciidoc b/README.asciidoc index 1bad9fbe9e30e..a8b3704887e5b 100644 --- a/README.asciidoc +++ b/README.asciidoc @@ -220,6 +220,12 @@ https://www.elastic.co/guide/en/elasticsearch/reference/current/index.html[elast For information about our documentation processes, see the xref:docs/README.asciidoc[docs README]. +[[examples]] +== Examples and guides + +The https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] repo contains executable Python notebooks, sample apps, and resources to test out Elasticsearch for vector search, hybrid search and generative AI use cases. + + [[contribute]] == Contribute diff --git a/TESTING.asciidoc b/TESTING.asciidoc index 305e0f6371833..d33121a15dcf7 100644 --- a/TESTING.asciidoc +++ b/TESTING.asciidoc @@ -453,6 +453,11 @@ You can run a group of YAML test by using wildcards: --tests "org.elasticsearch.test.rest.ClientYamlTestSuiteIT.test {yaml=index/*/*}" --------------------------------------------------------------------------- +Note that if the selected test via the `--tests` filter is not a valid test, i.e., the YAML test +runner is not able to parse and load it, you might get an error message indicating that the test +was not found. In such cases, running the whole suite without using the `--tests` could show more +specific error messages about why the test runner is not able to parse or load a certain test. + The YAML REST tests support all the options provided by the randomized runner, plus the following: * `tests.rest.blacklist`: comma separated globs that identify tests that are diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java index e8c86967d14d8..e805958d04e78 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/compute/operator/EvalBenchmark.java @@ -19,6 +19,7 @@ import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.core.TimeValue; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; import org.elasticsearch.xpack.esql.expression.function.scalar.multivalue.MvMin; @@ -27,7 +28,6 @@ import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.Add; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.EsField; @@ -44,6 +44,7 @@ import org.openjdk.jmh.annotations.Warmup; import java.time.Duration; +import java.util.Arrays; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -133,9 +134,7 @@ private static FieldAttribute intField() { private static Layout layout(FieldAttribute... fields) { Layout.Builder layout = new Layout.Builder(); - for (FieldAttribute field : fields) { - layout.appendChannel(field.id()); - } + layout.append(Arrays.asList(fields)); return layout.build(); } diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java index a2f91f771290a..56002554cb140 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/AllocationBenchmark.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; import org.openjdk.jmh.annotations.Benchmark; @@ -140,17 +141,17 @@ public void setUp() throws Exception { } RoutingTable routingTable = rb.build(); DiscoveryNodes.Builder nb = DiscoveryNodes.builder(); - Map transportVersions = new HashMap<>(); + Map compatibilityVersions = new HashMap<>(); for (int i = 1; i <= numNodes; i++) { String id = "node" + i; nb.add(Allocators.newNode(id, Collections.singletonMap("tag", "tag_" + (i % numTags)))); - transportVersions.put(id, TransportVersion.current()); + compatibilityVersions.put(id, new CompatibilityVersions(TransportVersion.current())); } initialClusterState = ClusterState.builder(ClusterName.DEFAULT) .metadata(metadata) .routingTable(routingTable) .nodes(nb) - .transportVersions(transportVersions) + .compatibilityVersions(compatibilityVersions) .build(); } diff --git a/branches.json b/branches.json new file mode 100644 index 0000000000000..3f6bcc88144da --- /dev/null +++ b/branches.json @@ -0,0 +1,17 @@ +{ + "notice": "This file is not maintained outside of the main branch and should only be used for tooling.", + "branches": [ + { + "branch": "main" + }, + { + "branch": "8.10" + }, + { + "branch": "8.9" + }, + { + "branch": "7.17" + } + ] +} diff --git a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml index 6a12ee5b0403b..211faf973b772 100644 --- a/build-tools-internal/src/main/resources/checkstyle_suppressions.xml +++ b/build-tools-internal/src/main/resources/checkstyle_suppressions.xml @@ -30,8 +30,9 @@ - + + diff --git a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt index c22339be5332f..be3fea399a830 100644 --- a/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt +++ b/build-tools-internal/src/main/resources/forbidden/es-server-signatures.txt @@ -156,5 +156,5 @@ org.elasticsearch.cluster.service.ClusterService#submitUnbatchedStateUpdateTask( org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer) org.elasticsearch.cluster.ClusterStateTaskExecutor$TaskContext#success(java.util.function.Consumer, org.elasticsearch.cluster.ClusterStateAckListener) -@defaultMessage ClusterState#transportVersions are for internal use only. Use ClusterState#getMinTransportVersion or a different version. See TransportVersion javadocs for more info. -org.elasticsearch.cluster.ClusterState#transportVersions() +@defaultMessage ClusterState#compatibilityVersions are for internal use only. Use ClusterState#getMinVersions or a different version. See TransportVersion javadocs for more info. +org.elasticsearch.cluster.ClusterState#compatibilityVersions() diff --git a/build.gradle b/build.gradle index 3acd0fa6195eb..c33489c46b53c 100644 --- a/build.gradle +++ b/build.gradle @@ -72,9 +72,48 @@ tasks.register("updateCIBwcVersions") { file << " - \"$it\"\n" } } + + def writeBuildkiteList = { String outputFilePath, String pipelineTemplatePath, List versions -> + def outputFile = file(outputFilePath) + def pipelineTemplate = file(pipelineTemplatePath) + + def listString = "[" + versions.collect { "\"${it}\"" }.join(", ") + "]" + outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll('\\$BWC_LIST', listString) + } + + def writeBuildkiteSteps = { String outputFilePath, String pipelineTemplatePath, String stepTemplatePath, List versions -> + def outputFile = file(outputFilePath) + def pipelineTemplate = file(pipelineTemplatePath) + def stepTemplate = file(stepTemplatePath) + + def steps = "" + versions.each { + steps += "\n" + stepTemplate.text.replaceAll('\\$BWC_VERSION', it.toString()) + } + + outputFile.text = "# This file is auto-generated. See ${pipelineTemplatePath}\n" + pipelineTemplate.text.replaceAll(' *\\$BWC_STEPS', steps) + } + doLast { writeVersions(file(".ci/bwcVersions"), BuildParams.bwcVersions.allIndexCompatible) writeVersions(file(".ci/snapshotBwcVersions"), BuildParams.bwcVersions.unreleasedIndexCompatible) + writeBuildkiteList( + ".buildkite/pipelines/intake.yml", + ".buildkite/pipelines/intake.template.yml", + BuildParams.bwcVersions.unreleasedIndexCompatible + ) + writeBuildkiteSteps( + ".buildkite/pipelines/periodic.yml", + ".buildkite/pipelines/periodic.template.yml", + ".buildkite/pipelines/periodic.bwc.template.yml", + BuildParams.bwcVersions.allIndexCompatible + ) + writeBuildkiteSteps( + ".buildkite/pipelines/periodic-packaging.yml", + ".buildkite/pipelines/periodic-packaging.template.yml", + ".buildkite/pipelines/periodic-packaging.bwc.template.yml", + BuildParams.bwcVersions.allIndexCompatible + ) } } diff --git a/catalog-info.yaml b/catalog-info.yaml index 2258cc11beeef..4eb88fd121985 100644 --- a/catalog-info.yaml +++ b/catalog-info.yaml @@ -98,45 +98,45 @@ spec: build_pull_requests: false publish_commit_status: false trigger_mode: none ---- -# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json -apiVersion: backstage.io/v1alpha1 -kind: Resource -metadata: - name: buildkite-pipeline-elasticsearch-periodic-trigger - description: Triggers periodic pipeline for all required branches - links: - - title: Pipeline - url: https://buildkite.com/elastic/elasticsearch-periodic-trigger -spec: - type: buildkite-pipeline - system: buildkite - owner: group:elasticsearch-team - implementation: - apiVersion: buildkite.elastic.dev/v1 - kind: Pipeline - metadata: - description: ":elasticsearch: Triggers periodic pipeline for all required branches" - name: elasticsearch / periodic / trigger - spec: - repository: elastic/elasticsearch - pipeline_file: .buildkite/pipelines/periodic.trigger.yml - branch_configuration: main - teams: - elasticsearch-team: {} - ml-core: {} - everyone: - access_level: BUILD_AND_READ - provider_settings: - build_branches: false - build_pull_requests: false - publish_commit_status: false - trigger_mode: none - schedules: - Periodically on main: - branch: main - cronline: "0 0,8,16 * * * America/New_York" - message: "Triggers pipelines 3x daily" +# --- +# # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +# apiVersion: backstage.io/v1alpha1 +# kind: Resource +# metadata: +# name: buildkite-pipeline-elasticsearch-periodic-trigger +# description: Triggers periodic pipelines for all required branches +# links: +# - title: Pipeline +# url: https://buildkite.com/elastic/elasticsearch-periodic-trigger +# spec: +# type: buildkite-pipeline +# system: buildkite +# owner: group:elasticsearch-team +# implementation: +# apiVersion: buildkite.elastic.dev/v1 +# kind: Pipeline +# metadata: +# description: ":elasticsearch: Triggers periodic pipelines for all required branches" +# name: elasticsearch / periodic / trigger +# spec: +# repository: elastic/elasticsearch +# pipeline_file: .buildkite/scripts/periodic.trigger.sh +# branch_configuration: main +# teams: +# elasticsearch-team: {} +# ml-core: {} +# everyone: +# access_level: BUILD_AND_READ +# provider_settings: +# build_branches: false +# build_pull_requests: false +# publish_commit_status: false +# trigger_mode: none +# schedules: +# Periodically on main: +# branch: main +# cronline: "0 0,8,16 * * * America/New_York" +# message: "Triggers pipelines 3x daily" --- # yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json apiVersion: backstage.io/v1alpha1 @@ -269,3 +269,37 @@ spec: branch: lucene_snapshot cronline: "0 9,12,15,18 * * * America/New_York" message: "Runs tests against lucene_snapshot branch several times per day" +--- +# yaml-language-server: $schema=https://gist.githubusercontent.com/elasticmachine/988b80dae436cafea07d9a4a460a011d/raw/e57ee3bed7a6f73077a3f55a38e76e40ec87a7cf/rre.schema.json +apiVersion: backstage.io/v1alpha1 +kind: Resource +metadata: + name: buildkite-pipeline-elasticsearch-ecs-dynamic-template-tests + description: Runs ECS dynamic template tests against main branch + links: + - title: Pipeline + url: https://buildkite.com/elastic/elasticsearch-ecs-dynamic-template-tests +spec: + type: buildkite-pipeline + system: buildkite + owner: group:elasticsearch-team + implementation: + apiVersion: buildkite.elastic.dev/v1 + kind: Pipeline + metadata: + description: ":elasticsearch: ECS dynamic template tests against main branch" + name: elasticsearch / ecs-dynamic-templates / tests + spec: + repository: elastic/elasticsearch + pipeline_file: .buildkite/pipelines/ecs-dynamic-template-tests.yml + provider_settings: + trigger_mode: none + teams: + elasticsearch-team: {} + ml-core: {} + everyone: + access_level: READ_ONLY + schedules: + Daily: + branch: main + cronline: "0 12 * * * America/New_York" diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java index 981e6de9c9414..13a72ee64c03f 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/StringStatsAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.client.analytics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.query.QueryRewriteContext; @@ -125,6 +126,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java index 5af136f75bb71..335f96615d607 100644 --- a/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java +++ b/client/rest-high-level/src/main/java/org/elasticsearch/client/analytics/TopMetricsAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.client.analytics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.index.query.QueryRewriteContext; @@ -102,6 +103,6 @@ protected AggregationBuilder shallowCopy(Builder factoriesBuilder, Map> from {plugin_url}/{plugin_name}/{plugin_name}-{version}.zip. To verify the `.zip` file, use the {plugin_url}/{plugin_name}/{plugin_name}-{version}.zip.sha512[SHA hash] or {plugin_url}/{plugin_name}/{plugin_name}-{version}.zip.asc[ASC key]. -endif::[] [discrete] [id="{plugin_name}-remove"] diff --git a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc index b88c5de0b8185..162164e12872d 100644 --- a/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/stemmer-tokenfilter.asciidoc @@ -214,6 +214,9 @@ Norwegian (Nynorsk):: {lucene-analysis-docs}/no/NorwegianLightStemmer.html[*`light_nynorsk`*], {lucene-analysis-docs}/no/NorwegianMinimalStemmer.html[`minimal_nynorsk`] +Persian:: +{lucene-analysis-docs}/fa/PersianStemmer.html[*`persian`*] + Portuguese:: https://dl.acm.org/citation.cfm?id=1141523&dl=ACM&coll=DL&CFID=179095584&CFTOKEN=80067181[*`light_portuguese`*], pass:macros[http://www.inf.ufrgs.br/~buriol/papers/Orengo_CLEF07.pdf[`minimal_portuguese`\]], diff --git a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc index b69f71b7bdc68..ea39d8d4a4c94 100644 --- a/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc +++ b/docs/reference/analysis/tokenfilters/trim-tokenfilter.asciidoc @@ -9,7 +9,7 @@ can change the length of a token, the `trim` filter does _not_ change a token's offsets. The `trim` filter uses Lucene's -https://lucene.apache.org/core/{lucene_version_path}/analyzers-common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html[TrimFilter]. +https://lucene.apache.org/core/{lucene_version_path}/analysis/common/org/apache/lucene/analysis/miscellaneous/TrimFilter.html[TrimFilter]. [TIP] ==== @@ -110,4 +110,4 @@ PUT trim_example } } } ----- \ No newline at end of file +---- diff --git a/docs/reference/cluster.asciidoc b/docs/reference/cluster.asciidoc index beef3f241d054..3de386d3288c6 100644 --- a/docs/reference/cluster.asciidoc +++ b/docs/reference/cluster.asciidoc @@ -24,7 +24,7 @@ one of the following: `coordinating_only:true`, which respectively add to the subset all master-eligible nodes, all data nodes, all ingest nodes, all voting-only nodes, all machine learning nodes, and all coordinating-only nodes. -* `master:false`, `data:false`, `ingest:false`, `voting_only:true`, `ml:false`, +* `master:false`, `data:false`, `ingest:false`, `voting_only:false`, `ml:false`, or `coordinating_only:false`, which respectively remove from the subset all master-eligible nodes, all data nodes, all ingest nodes, all voting-only nodes, all machine learning nodes, and all coordinating-only nodes. diff --git a/docs/reference/cluster/remote-info.asciidoc b/docs/reference/cluster/remote-info.asciidoc index 0c1cb1f3adbb7..240aab04e82d1 100644 --- a/docs/reference/cluster/remote-info.asciidoc +++ b/docs/reference/cluster/remote-info.asciidoc @@ -69,7 +69,7 @@ Refer to <>. mode is configured. `cluster_credentials`:: -// TODO: fix the link to new page of API key based remote clusters beta:[] -This field presents and has value of `::es_redacted::` only when the remote cluster -is configured with the API key based model. Otherwise, the field is not present. +This field presents and has value of `::es_redacted::` only when the +<>. +Otherwise, the field is not present. diff --git a/docs/reference/data-streams/set-up-tsds.asciidoc b/docs/reference/data-streams/set-up-tsds.asciidoc index 3c15011871f89..a98e3c7302424 100644 --- a/docs/reference/data-streams/set-up-tsds.asciidoc +++ b/docs/reference/data-streams/set-up-tsds.asciidoc @@ -177,6 +177,7 @@ Optionally, the index settings component template for a TSDS can include: * Your lifecycle policy in the `index.lifecycle.name` index setting. * The <> index setting. +* The <> index setting. * Other index settings, such as <>, for your TSDS's backing indices. diff --git a/docs/reference/data-streams/tsds-index-settings.asciidoc b/docs/reference/data-streams/tsds-index-settings.asciidoc index fa5c9b8cd821f..8091163ffe883 100644 --- a/docs/reference/data-streams/tsds-index-settings.asciidoc +++ b/docs/reference/data-streams/tsds-index-settings.asciidoc @@ -33,6 +33,15 @@ days). Only indices with an `index.mode` of `time_series` support this setting. For more information, refer to <>. Additionally this setting can not be less than `time_series.poll_interval` cluster setting. +[[index-look-back-time]] +`index.look_back_time`:: +(<<_static_index_settings,Static>>, <>) +Interval used to calculate the `index.time_series.start_time` for a TSDS's first +backing index when a tsdb data stream is created. Defaults to `2h` (2 hours). +Accepts `1m` (one minute) to `7d` (seven days). Only indices with an `index.mode` +of `time_series` support this setting. For more information, +refer to <>. + [[index-routing-path]] `index.routing_path`:: (<<_static_index_settings,Static>>, string or array of strings) Plain `keyword` fields used to route documents in a TSDS to index shards. Supports wildcards diff --git a/docs/reference/data-streams/tsds.asciidoc b/docs/reference/data-streams/tsds.asciidoc index 3f49a7ab8c700..d6e9ea08f0892 100644 --- a/docs/reference/data-streams/tsds.asciidoc +++ b/docs/reference/data-streams/tsds.asciidoc @@ -253,6 +253,22 @@ value borders the `index.time_series.start_time` for the new write index. This ensures the `@timestamp` ranges for neighboring backing indices always border but never overlap. +[discrete] +[[tsds-look-back-time]] +==== Look-back time + +Use the <> index setting to +configure how far in the past you can add documents to an index. When you +create a data stream for a TSDS, {es} calculates the index's +`index.time_series.start_time` value as: + +`now - index.look_back_time` + +This setting is only used when a data stream gets created and controls +the `index.time_series.start_time` index setting of the first backing index. +Configuring this index setting can be useful to accept documents with `@timestamp` +field values that are older than 2 hours (the `index.look_back_time` default). + [discrete] [[tsds-accepted-time-range]] ==== Accepted time range for adding data diff --git a/docs/reference/docs/reindex.asciidoc b/docs/reference/docs/reindex.asciidoc index 5f8b1fcc7f0c3..97d77cf91376c 100644 --- a/docs/reference/docs/reindex.asciidoc +++ b/docs/reference/docs/reindex.asciidoc @@ -123,7 +123,7 @@ conflict. IMPORTANT: Because data streams are <>, any reindex request to a destination data stream must have an `op_type` -of`create`. A reindex can only add new documents to a destination data stream. +of `create`. A reindex can only add new documents to a destination data stream. It cannot update existing documents in a destination data stream. By default, version conflicts abort the `_reindex` process. diff --git a/docs/reference/esql/esql-functions.asciidoc b/docs/reference/esql/esql-functions.asciidoc index 5fa122814b136..1e29d312906ae 100644 --- a/docs/reference/esql/esql-functions.asciidoc +++ b/docs/reference/esql/esql-functions.asciidoc @@ -32,10 +32,10 @@ these functions: * <> * <> * <> +* <> * <> * <> * <> -* <> * <> * <> * <> @@ -47,7 +47,9 @@ these functions: * <> * <> * <> +* <> * <> +* <> * <> * <> * <> @@ -93,10 +95,10 @@ include::functions/is_finite.asciidoc[] include::functions/is_infinite.asciidoc[] include::functions/is_nan.asciidoc[] include::functions/least.asciidoc[] +include::functions/left.asciidoc[] include::functions/length.asciidoc[] include::functions/log10.asciidoc[] include::functions/ltrim.asciidoc[] -include::functions/rtrim.asciidoc[] include::functions/mv_avg.asciidoc[] include::functions/mv_concat.asciidoc[] include::functions/mv_count.asciidoc[] @@ -108,7 +110,9 @@ include::functions/mv_sum.asciidoc[] include::functions/now.asciidoc[] include::functions/pi.asciidoc[] include::functions/pow.asciidoc[] +include::functions/right.asciidoc[] include::functions/round.asciidoc[] +include::functions/rtrim.asciidoc[] include::functions/sin.asciidoc[] include::functions/sinh.asciidoc[] include::functions/split.asciidoc[] diff --git a/docs/reference/esql/functions/left.asciidoc b/docs/reference/esql/functions/left.asciidoc new file mode 100644 index 0000000000000..91a2d544b279f --- /dev/null +++ b/docs/reference/esql/functions/left.asciidoc @@ -0,0 +1,19 @@ +[[esql-left]] +=== `LEFT` +[.text-center] +image::esql/functions/signature/left.svg[Embedded,opts=inline] + +Return the substring that extracts 'length' chars from the 'string' starting from the left. + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=left] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=left-result] +|=== + +Supported types: + +include::types/left.asciidoc[] diff --git a/docs/reference/esql/functions/right.asciidoc b/docs/reference/esql/functions/right.asciidoc new file mode 100644 index 0000000000000..ec55a2affbe40 --- /dev/null +++ b/docs/reference/esql/functions/right.asciidoc @@ -0,0 +1,19 @@ +[[esql-right]] +=== `RIGHT` +[.text-center] +image::esql/functions/signature/right.svg[Embedded,opts=inline] + +Return the substring that extracts 'length' chars from the 'string' starting from the right. + +[source.merge.styled,esql] +---- +include::{esql-specs}/string.csv-spec[tag=right] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/string.csv-spec[tag=right-result] +|=== + +Supported types: + +include::types/right.asciidoc[] diff --git a/docs/reference/esql/functions/signature/ceil.svg b/docs/reference/esql/functions/signature/ceil.svg index baff44ba0cb70..bb07117e56630 100644 --- a/docs/reference/esql/functions/signature/ceil.svg +++ b/docs/reference/esql/functions/signature/ceil.svg @@ -1 +1 @@ -CEIL(arg1) \ No newline at end of file +CEIL(n) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/left.svg b/docs/reference/esql/functions/signature/left.svg new file mode 100644 index 0000000000000..ec14bf8c72131 --- /dev/null +++ b/docs/reference/esql/functions/signature/left.svg @@ -0,0 +1 @@ +LEFT(string,length) \ No newline at end of file diff --git a/docs/reference/esql/functions/signature/right.svg b/docs/reference/esql/functions/signature/right.svg new file mode 100644 index 0000000000000..0afa5dbf01f16 --- /dev/null +++ b/docs/reference/esql/functions/signature/right.svg @@ -0,0 +1 @@ +RIGHT(string,length) \ No newline at end of file diff --git a/docs/reference/esql/functions/types/ceil.asciidoc b/docs/reference/esql/functions/types/ceil.asciidoc index f1831429aa95c..54341360fed3f 100644 --- a/docs/reference/esql/functions/types/ceil.asciidoc +++ b/docs/reference/esql/functions/types/ceil.asciidoc @@ -1,6 +1,6 @@ [%header.monospaced.styled,format=dsv,separator=|] |=== -arg1 | result +n | result double | double integer | integer long | long diff --git a/docs/reference/esql/functions/types/left.asciidoc b/docs/reference/esql/functions/types/left.asciidoc new file mode 100644 index 0000000000000..c30a055f3be49 --- /dev/null +++ b/docs/reference/esql/functions/types/left.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +string | length | result +keyword | integer | keyword +|=== diff --git a/docs/reference/esql/functions/types/right.asciidoc b/docs/reference/esql/functions/types/right.asciidoc new file mode 100644 index 0000000000000..c30a055f3be49 --- /dev/null +++ b/docs/reference/esql/functions/types/right.asciidoc @@ -0,0 +1,5 @@ +[%header.monospaced.styled,format=dsv,separator=|] +|=== +string | length | result +keyword | integer | keyword +|=== diff --git a/docs/reference/esql/index.asciidoc b/docs/reference/esql/index.asciidoc index fd126cf3b3c9a..7e22eae3ff1b5 100644 --- a/docs/reference/esql/index.asciidoc +++ b/docs/reference/esql/index.asciidoc @@ -95,9 +95,8 @@ POST /_query?format=txt [discrete] ==== {kib} -{esql} can be used in Discover to explore a data set, and in Lens to visualize it. -First, enable the `enableTextBased` setting in *Advanced Settings*. Next, in -Discover or Lens, from the data view dropdown, select *{esql}*. +Use {esql} in Discover to explore a data set. From the data view dropdown, +select *Try {esql}* to get started. NOTE: {esql} queries in Discover and Lens are subject to the time range selected with the time filter. @@ -136,6 +135,8 @@ include::aggregation-functions.asciidoc[] include::multivalued-fields.asciidoc[] +include::metadata-fields.asciidoc[] + include::task-management.asciidoc[] :esql-tests!: diff --git a/docs/reference/esql/metadata-fields.asciidoc b/docs/reference/esql/metadata-fields.asciidoc new file mode 100644 index 0000000000000..69c9c0c04dd7b --- /dev/null +++ b/docs/reference/esql/metadata-fields.asciidoc @@ -0,0 +1,55 @@ +[[esql-metadata-fields]] +== {esql} metadata fields + +++++ +Metadata fields +++++ + +{esql} can access <>. The currently +supported ones are: + + * <>: the index to which the document belongs. + The field is of the type <>. + + * <>: the source document's ID. The field is of the + type <>. + + * `_version`: the source document's version. The field is of the type + <>. + +To enable the access to these fields, the <> source command needs +to be provided with a dedicated directive: + +[source,esql] +---- +FROM index [METADATA _index, _id] +---- + +Metadata fields are only available if the source of the data is an index. +Consequently, `FROM` is the only source commands that supports the `METADATA` +directive. + +Once enabled, the fields are then available to subsequent processing commands, just +like the other index fields: + +[source.merge.styled,esql] +---- +include::{esql-specs}/metadata-ignoreCsvTests.csv-spec[tag=multipleIndices] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/metadata-ignoreCsvTests.csv-spec[tag=multipleIndices-result] +|=== + +Also, similar to the index fields, once an aggregation is performed, a +metadata field will no longer be accessible to subsequent commands, unless +used as grouping field: + +[source.merge.styled,esql] +---- +include::{esql-specs}/metadata-ignoreCsvTests.csv-spec[tag=metaIndexInAggs] +---- +[%header.monospaced.styled,format=dsv,separator=|] +|=== +include::{esql-specs}/metadata-ignoreCsvTests.csv-spec[tag=metaIndexInAggs-result] +|=== diff --git a/docs/reference/esql/source-commands/from.asciidoc b/docs/reference/esql/source-commands/from.asciidoc index 64bd6f8c8dd88..69ab152de9cd8 100644 --- a/docs/reference/esql/source-commands/from.asciidoc +++ b/docs/reference/esql/source-commands/from.asciidoc @@ -27,3 +27,10 @@ or aliases: ---- FROM employees-00001,employees-* ---- + +Use the `METADATA` directive to enable <>: + +[source,esql] +---- +FROM employees [METADATA _id] +---- diff --git a/docs/reference/graph/explore.asciidoc b/docs/reference/graph/explore.asciidoc index 1897aea3ad2fa..34ac367125ade 100644 --- a/docs/reference/graph/explore.asciidoc +++ b/docs/reference/graph/explore.asciidoc @@ -387,7 +387,7 @@ To spider out, you need to specify two things: * The set of vertices you already know about that you want to exclude from the results of the spidering operation. -You specify this information using `include`and `exclude` clauses. For example, +You specify this information using `include` and `exclude` clauses. For example, the following request starts with the product `1854873` and spiders out to find additional search terms associated with that product. The terms "midi", "midi keyboard", and "synth" are excluded from the results. diff --git a/docs/reference/health/health.asciidoc b/docs/reference/health/health.asciidoc index 8f9b090f4e101..4baed1fba5edd 100644 --- a/docs/reference/health/health.asciidoc +++ b/docs/reference/health/health.asciidoc @@ -413,7 +413,7 @@ watermark threshold>>. `unhealthy_policies`:: (map) A detailed view on the policies that are considered unhealthy due to having - several consecutive unssuccesful invocations. + several consecutive unsuccessful invocations. The `count` key represents the number of unhealthy policies (int). The `invocations_since_last_success` key will report a map where the unhealthy policy name is the key and it's corresponding number of failed invocations is the value. diff --git a/docs/reference/how-to/knn-search.asciidoc b/docs/reference/how-to/knn-search.asciidoc index 10dd57efc3ced..0501fd5f59d01 100644 --- a/docs/reference/how-to/knn-search.asciidoc +++ b/docs/reference/how-to/knn-search.asciidoc @@ -21,8 +21,8 @@ options. The `cosine` option accepts any float vector and computes the cosine similarity. While this is convenient for testing, it's not the most efficient approach. Instead, we recommend using the `dot_product` option to compute the -similarity. To use `dot_product`, all vectors need to be normalized in advance -to have length 1. The `dot_product` option is significantly faster, since it +similarity. When using `dot_product`, all vectors are normalized during index to have +a magnitude of 1. The `dot_product` option is significantly faster, since it avoids performing extra vector length computations during the search. [discrete] diff --git a/docs/reference/ilm/error-handling.asciidoc b/docs/reference/ilm/error-handling.asciidoc index da76e475c9d14..ad974c6f1c2ed 100644 --- a/docs/reference/ilm/error-handling.asciidoc +++ b/docs/reference/ilm/error-handling.asciidoc @@ -156,7 +156,7 @@ You can use the <> to monitor the [discrete] ==== How `min_age` is calculated -When setting up an <> or <>, be aware that`min_age` can be relative to either the rollover time or the index creation time. +When setting up an <> or <>, be aware that `min_age` can be relative to either the rollover time or the index creation time. If you use <>, `min_age` is calculated relative to the time the index was rolled over. This is because the <> generates a new index. The `creation_date` of the new index (retrievable via <>) is used in the calculation. If you do not use rollover in the {ilm-init} policy, `min_age` is calculated relative to the `creation_date` of the original index. diff --git a/docs/reference/ingest/processors/redact.asciidoc b/docs/reference/ingest/processors/redact.asciidoc index 91a1963af76c0..2004e48c2ed78 100644 --- a/docs/reference/ingest/processors/redact.asciidoc +++ b/docs/reference/ingest/processors/redact.asciidoc @@ -40,6 +40,7 @@ patterns. Legacy Grok patterns are not supported. | `suffix` | no | > | End a redacted section with this token | `ignore_missing` | no | `true` | If `true` and `field` does not exist or is `null`, the processor quietly exits without modifying the document include::common-options.asciidoc[] +| `skip_if_unlicensed` | no | `false` | If `true` and the current license does not support running redact processors, then the processor quietly exits without modifying the document |====== In this example the predefined `IP` Grok pattern is used to match @@ -235,3 +236,18 @@ The watchdog interrupts expressions that take too long to execute. When interrupted, the Redact processor fails with an error. The same <> that control the Grok Watchdog timeout also apply to the Redact processor. + +[[redact-licensing]] +==== Licensing + +The `redact` processor is a commercial feature that requires an +appropriate license. For more information, refer to +https://www.elastic.co/subscriptions. + +The `skip_if_unlicensed` option can be set on a redact processor to +control behavior when the cluster's license is not sufficient to run +such a processor. `skip_if_unlicensed` defaults to `false`, and the +redact processor will throw an exception if the cluster's license is +not sufficient. If you set the `skip_if_unlicensed` option to `true`, +however, then the redact processor not throw an exception (it will do +nothing at all) in the case of an insufficient license. diff --git a/docs/reference/ingest/processors/set.asciidoc b/docs/reference/ingest/processors/set.asciidoc index 9543e94fa14cf..7db8c62071682 100644 --- a/docs/reference/ingest/processors/set.asciidoc +++ b/docs/reference/ingest/processors/set.asciidoc @@ -16,7 +16,7 @@ its value will be replaced with the provided one. | `value` | yes* | - | The value to be set for the field. Supports <>. May specify only one of `value` or `copy_from`. | `copy_from` | no | - | The origin field which will be copied to `field`, cannot set `value` simultaneously. Supported data types are `boolean`, `number`, `array`, `object`, `string`, `date`, etc. | `override` | no | `true` | If `true` processor will update fields with pre-existing non-null-valued field. When set to `false`, such fields will not be touched. -| `ignore_empty_value` | no | `false` | If `true` and `value` is a <> that evaluates to `null` or the empty string, the processor quietly exits without modifying the document +| `ignore_empty_value` | no | `false` | If `true` and used in combination with `value` which is a <> that evaluates to `null` or an empty string, the processor quietly exits without modifying the document. Similarly, if used in combination with `copy_from` it will quietly exit if the field does not exist or its value evaluates to `null` or an empty string. | `media_type` | no | `application/json` | The media type for encoding `value`. Applies only when `value` is a <>. Must be one of `application/json`, `text/plain`, or `application/x-www-form-urlencoded`. include::common-options.asciidoc[] |====== diff --git a/docs/reference/mapping/types/dense-vector.asciidoc b/docs/reference/mapping/types/dense-vector.asciidoc index 04f84d9003fea..8098a4b0b3299 100644 --- a/docs/reference/mapping/types/dense-vector.asciidoc +++ b/docs/reference/mapping/types/dense-vector.asciidoc @@ -62,6 +62,9 @@ In many cases, a brute-force kNN search is not efficient enough. For this reason, the `dense_vector` type supports indexing vectors into a specialized data structure to support fast kNN retrieval through the <> in the search API +Unmapped array fields of float elements with size between 128 and 2048 are dynamically mapped as `dense_vector` with a default similariy of `cosine`. +You can override the default similarity by explicitly mapping the field as `dense_vector` with the desired similarity. + Indexing is enabled by default for dense vector fields. When indexing is enabled, you can define the vector similarity to use in kNN search: @@ -128,8 +131,9 @@ trade off of lower precision. Vectors using `byte` require dimensions with integer values between -128 to 127, inclusive for both indexing and searching. `dims`:: -(Required, integer) -Number of vector dimensions. Can't exceed `2048`. +(Optional, integer) +Number of vector dimensions. Can't exceed `2048`. If `dims` is not specified, +it will be set to the length of the first vector added to the field. `index`:: (Optional, Boolean) @@ -159,9 +163,9 @@ Computes the dot product of two vectors. This option provides an optimized way to perform cosine similarity. The constraints and computed score are defined by `element_type`. + -When `element_type` is `float`, all vectors must be unit length, including both -document and query vectors. The document `_score` is computed as -`(1 + dot_product(query, vector)) / 2`. +When `element_type` is `float`, all vectors are automatically converted to unit length, including both +document and query vectors. Consequently, `dot_product` does not allow vectors with a zero magnitude. +The document `_score` is computed as `(1 + dot_product(query, vector)) / 2`. + When `element_type` is `byte`, all vectors must have the same length including both document and query vectors or results will be inaccurate. @@ -171,9 +175,9 @@ where `dims` is the number of dimensions per vector. `cosine`::: Computes the cosine similarity. Note that the most efficient way to perform -cosine similarity is to normalize all vectors to unit length, and instead use +cosine similarity is to have all vectors normalized to unit length, and instead use `dot_product`. You should only use `cosine` if you need to preserve the -original vectors and cannot normalize them in advance. The document `_score` +original vectors and cannot allow Elasticsearch to normalize them. The document `_score` is computed as `(1 + cosine(query, vector)) / 2`. The `cosine` similarity does not allow vectors with zero magnitude, since cosine is not defined in this case. diff --git a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc index 75aa2676e7ed7..bad4ab93676bb 100644 --- a/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc +++ b/docs/reference/migration/migrate_8_0/cluster-node-setting-changes.asciidoc @@ -230,7 +230,7 @@ Remove the `http.content_type.required` setting from `elasticsearch.yml`. Specif [%collapsible] ==== *Details* + -The `http.tcp_no_delay` setting was deprecated in 7.x and has been removed in 8.0. Use`http.tcp.no_delay` instead. +The `http.tcp_no_delay` setting was deprecated in 7.x and has been removed in 8.0. Use `http.tcp.no_delay` instead. *Impact* + Replace the `http.tcp_no_delay` setting with `http.tcp.no_delay`. @@ -246,7 +246,7 @@ The `network.tcp.connect_timeout` setting was deprecated in 7.x and has been rem was a fallback setting for `transport.connect_timeout`. *Impact* + -Remove the`network.tcp.connect_timeout` setting. +Remove the `network.tcp.connect_timeout` setting. Use the `transport.connect_timeout` setting to change the default connection timeout for client connections. Specifying `network.tcp.connect_timeout` in `elasticsearch.yml` will result in an diff --git a/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc b/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc index 2d1cc1be7f3a7..22e1caf1bf5e4 100644 --- a/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc +++ b/docs/reference/migration/migrate_8_0/java-api-changes.asciidoc @@ -22,7 +22,7 @@ Update your workflow and applications to use the `ilm` package in place of To create `Fuzziness` instances, use the `fromString` and `fromEdits` method instead of the `build` method that used to accept both Strings and numeric values. Several fuzziness setters on query builders (e.g. -MatchQueryBuilder#fuzziness) now accept only a `Fuzziness`instance instead of +MatchQueryBuilder#fuzziness) now accept only a `Fuzziness` instance instead of an Object. Fuzziness used to be lenient when it comes to parsing arbitrary numeric values diff --git a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc index ad3ef88c7d4b6..335d76ffcd56a 100644 --- a/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc +++ b/docs/reference/ml/trained-models/apis/stop-trained-model-deployment.asciidoc @@ -40,10 +40,13 @@ include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=deployment-id] include::{es-repo-dir}/ml/ml-shared.asciidoc[tag=allow-no-match-deployments] `force`:: -(Optional, Boolean) If true, the deployment is stopped even if it or one of its -model aliases is referenced by ingest pipelines. You can't use these pipelines +(Optional, Boolean) If true, the deployment is stopped even if it or one of its +model aliases is referenced by ingest pipelines. You can't use these pipelines until you restart the model deployment. +`finish_pending_work`:: +(Optional, Boolean) If true, the deployment is stopped after any queued work is completed. Defaults to `false`. + //// [role="child_attributes"] [[stop-trained-model-deployment-results]] @@ -63,4 +66,4 @@ The following example stops the `my_model_for_search` deployment: [source,console] -------------------------------------------------- POST _ml/trained_models/my_model_for_search/deployment/_stop --------------------------------------------------- \ No newline at end of file +-------------------------------------------------- diff --git a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc index ef22de65d53d8..29fe2b0aaf35e 100644 --- a/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-api-key.asciidoc @@ -48,6 +48,8 @@ To add a remote cluster using API key authentication: . <> . <> +If you run into any issues, refer to <>. + [[remote-clusters-prerequisites-api-key]] ==== Prerequisites @@ -63,13 +65,20 @@ information, refer to https://www.elastic.co/subscriptions. ===== On the remote cluster -. Enable the remote cluster server port on every node of the remote cluster by -setting `remote_cluster_server.enabled` to `true` in `elasticsearch.yml`. The -port number defaults to `9443` and can be configured with the -`remote_cluster.port` setting. Refer to <>. - -. Next, generate a CA and a server certificate/key pair. On one of the nodes -of the remote cluster, from the directory where {es} has been installed: +// tag::remote-cluster-steps[] +. Enable the remote cluster server on every node of the remote cluster. In +`elasticsearch.yml`: +.. Set <> to +`true`. +.. Configure the bind and publish address for remote cluster server traffic, for +example using <>. Without +configuring the address, remote cluster traffic may be bound to the local +interface, and remote clusters running on other machines can't connect. +.. Optionally, configure the remote server port using +<> (defaults to `9443`). +. Next, generate a certificate authority (CA) and a server certificate/key pair. +On one of the nodes of the remote cluster, from the directory where {es} has +been installed: .. Create a CA, if you don't have a CA already: + @@ -137,16 +146,18 @@ When prompted, enter the `CERT_PASSWORD` from the earlier step. . Restart the remote cluster. -. On the remote cluster, generate a cross-cluster API key using the +. On the remote cluster, generate a cross-cluster API key that provides access +to the indices you want to use for {ccs} or {ccr}. You can use the <> API or -{kibana-ref}/api-keys.html[Kibana]. Grant the key the required access for {ccs} -or {ccr}. +{kibana-ref}/api-keys.html[Kibana]. . Copy the encoded key (`encoded` in the response) to a safe location. You will need it to connect to the remote cluster later. +// end::remote-cluster-steps[] ===== On the local cluster +// tag::local-cluster-steps[] . On every node of the local cluster: .. Copy the `ca.crt` file generated on the remote cluster earlier into the @@ -159,6 +170,7 @@ need it to connect to the remote cluster later. xpack.security.remote_cluster_client.ssl.enabled: true xpack.security.remote_cluster_client.ssl.certificate_authorities: [ "remote-cluster-ca.crt" ] ---- +// end::local-cluster-steps[] .. Add the cross-cluster API key, created on the remote cluster earlier, to the keystore: diff --git a/docs/reference/modules/cluster/remote-clusters-cert.asciidoc b/docs/reference/modules/cluster/remote-clusters-cert.asciidoc index 60d6f1a186175..36dbde331f484 100644 --- a/docs/reference/modules/cluster/remote-clusters-cert.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-cert.asciidoc @@ -8,6 +8,8 @@ To add a remote cluster using TLS certificate authentication: . <> . <> +If you run into any issues, refer to <>. + [[remote-clusters-prerequisites-cert]] ==== Prerequisites diff --git a/docs/reference/modules/cluster/remote-clusters-migration.asciidoc b/docs/reference/modules/cluster/remote-clusters-migration.asciidoc new file mode 100644 index 0000000000000..9db7c4a0257ad --- /dev/null +++ b/docs/reference/modules/cluster/remote-clusters-migration.asciidoc @@ -0,0 +1,266 @@ +[[remote-clusters-migrate]] +=== Migrate remote clusters from certificate to API key authentication + +++++ +Migrate from certificate to API key authentication +++++ + +The API key based security model for remote clusters offers administrators more +fine-grained access controls compared to the TLS certificate based security +model. For that reason, you may want to migrate from the certificate based +security model to the API key based model. + +While it is possible to migrate by defining a new remote cluster connection, +using a new alias, this has several downsides: + +- For {ccr}, it's not possible to change the leader cluster alias for existing +tasks. As a result, with a new remote cluster, follower indices would need to be +re-created from scratch. +- For {ccs}, transform and anomaly detection jobs do allow updating the remote +cluster alias. However, if the job was created with wildcards, for example +`*:source_index`, and `superuser`, adding a new remote cluster will cause the +job to do double the amount of work and potentially skew results with +duplications. + +For these reasons, you may prefer to migrate a remote cluster in-place, by +following these steps: + +. <> +. <> +. <> +. <> +. <> +. <> + +If you run into any issues, refer to <>. + +[[remote-clusters-migration-prerequisites]] +==== Prerequisites + +* The nodes of the local and remote clusters must be on version 8.10 or later. +* The local and remote clusters must have an appropriate license. For more +information, refer to https://www.elastic.co/subscriptions. + +[[remote-clusters-migration-remote-cluster]] +==== Reconfigure the remote cluster and generate a cross-cluster API key + +On the remote cluster: + +include::remote-clusters-api-key.asciidoc[tag=remote-cluster-steps] + +[[remote-clusters-migration-stop]] +==== Stop cross-cluster operations + +On the local cluster, stop any persistent tasks that refer to the remote +cluster: + +* Use the <> API to stop any transforms. +* Use the <> API to close any anomaly detection jobs. +* Use the <> API to pause any auto-follow {ccr}. +* Use the <> API to pause any manual {ccr} or existing +indices that were created from the auto-follow pattern. + +[[remote-clusters-migration-reconnect]] +==== Reconnect to the remote cluster + +On the local cluster: + +. Enhance any roles used by local cluster users with the required +<> for {ccr} and {ccs}. +Refer to <>. Note: + +** You only need to assign additional `remote_indices` privileges to existing +roles used for cross-cluster operations. You should be able to copy these +privileges from the original roles on the remote cluster, where they are defined +under the certification based security model. +** The roles on the local cluster can't exceed the `access` privilege granted by +the cross-cluster API key. Any extra local privileges will be suppressed by the +cross-cluster API key's privileges. +** No update is needed if the {ccr} or {ccs} tasks have been configured with a +`superuser` role. The `superuser` role is automatically updated to allow access +to all remote indices. +** Tasks that are run as regular users with named roles are immediately updated +with the new privileges. A task will load a new definition the next time it +runs. +** You need to restart tasks that are run using an API key (done in a later +step). + +. If you've dynamically configured the remote cluster (via the cluster settings +API): + +.. Retrieve the current remote cluster configuration, and store it in a safe +place. You may need it later in case you need to +<>. Use the cluster settings API: ++ +[source,console] +---- +GET /_cluster/settings?filter_path=persistent.cluster.remote +---- + +.. Remove the existing remote cluster definition by setting the remote cluster +settings to `null`. + +. If you've statically configured the remote cluster (via `elasticsearch.yml`), +copy the `cluster.remote` settings from `elasticsearch.yml`, and store them +in a safe place. You may need them later in case you need to +<>. + + +include::remote-clusters-api-key.asciidoc[tag=local-cluster-steps] + +.. Add the cross-cluster API key, created on the remote cluster earlier, to the +keystore: ++ +[source,sh] +---- +./bin/elasticsearch-keystore add cluster.remote.ALIAS.credentials +---- ++ +Replace `ALIAS` with the same alias that was used for cross-cluster operations +before the migration. When prompted, enter the encoded cross-cluster API key +created on the remote cluster earlier. + +. If you've dynamically configured the remote cluster (via the cluster settings +API): + +.. Restart the local cluster to load changes to the keystore. + +.. Re-add the remote cluster. Use the same remote cluster alias, and change the +transport port into the remote cluster port. For example: ++ +[source,console] +---- +PUT /_cluster/settings +{ + "persistent" : { + "cluster" : { + "remote" : { + "my_remote" : { <1> + "mode": "proxy", + "proxy_address": "my.remote.cluster.com:9443" <2> + } + } + } + } +} +---- +// TEST[skip:TODO] +<1> The remote cluster alias. Use the same alias that was used before the +migration. +<2> The remote cluster address with the remote cluster port, which defaults to +`9443`. + +. If you've statically configured the remote cluster (via `elasticsearch.yml`): + +.. Update the `cluster.remote` settings in `elasticsearch.yml` on each node of +the local cluster. Change the port into the remote cluster port, which defaults +to `9443`. + +.. Restart the local cluster to load changes to the keystore and settings. + +. Use the <> to verify that the +local cluster has successfully connected to the remote cluster: ++ +[source,console] +---- +GET /_remote/info +---- +// TEST[skip:TODO] ++ +The API response should indicate that the local cluster has connected to the +remote cluster: ++ +[source,console-result] +---- +{ + "my_remote": { + "connected": true, <1> + "mode": "proxy", + "proxy_address": "my.remote.cluster.com:9443", + "server_name": "", + "num_proxy_sockets_connected": 0, + "max_proxy_socket_connections": 18, + "initial_connect_timeout": "30s", + "skip_unavailable": false, + "cluster_credentials": "::es_redacted::" <2> + } +} +---- +// TEST[skip:TODO] +<1> The remote cluster is connected. +<2> If present, indicates the remote cluster has connected using API key +authentication. + +[[remote-clusters-migration-resume]] +==== Resume cross-cluster operations + +Resume any persistent tasks that you stopped earlier. Tasks should be restarted +by the same user or API key that created the task before the migration. Ensure +the roles of this user or API key have been updated with the required +`remote_indices` privileges. For users, tasks capture the caller's credentials +when started and run in that user's security context. For API keys, restarting a +task will update the task with the updated API key. + +* Use the <> API to start any transforms. +* Use the <> API to open any anomaly detection jobs. +* Use the <> API to resume any auto-follow {ccr}. +* Use the <> API to resume any manual {ccr} or +existing indices that were created from the auto-follow pattern. + +[[remote-clusters-migration-disable-cert]] +==== Disable certificate based authentication and authorization + +NOTE: Only proceed with this step if the migration has been proved successful on +the local cluster. If the migration is unsuccessful, either +<> or <>. + +Next, disable the certification based connection. Optionally, you can also +revoke the authorization. + +. There is no particular setting to enable or disable a certificate based cross +cluster connection, because it shares the same transport protocol with the +intra-cluster node-to-node communication. ++ +One way a remote cluster administrator can stop an existing local cluster from +connecting, is by changing TLS trust. The exact steps vary, depending on how the +clusters have been configured. A generic solution is to +<> so that any existing certificate/key, locally +or distributed, is no longer trusted. ++ +Another solution is to apply IP filters to the transport interface, blocking +traffic from outside the cluster. + +. Optionally, delete any roles on the remote cluster that were only used for +cross-cluster operations. These roles are no longer used under the API key based +security model. + +[[remote-clusters-migration-rollback]] +==== Rollback + +If you need to roll back, follow these steps on the local cluster: + +. Stop any persistent tasks that refer to the remote cluster. + +. Remove the remote cluster definition by setting the remote cluster settings to +`null`. + +. Remove the `remote_indices` privileges from any roles that were updated during +the migration. + +. On each node, remove the `remote_cluster_client.ssl.*` settings from +`elasticsearch.yml`. + +. Restart the local cluster to apply changes to the keystore and +`elasticsearch.yml`. + +. On the local cluster, apply the original remote cluster settings. If the +remote cluster connection has been configured statically (using the +`elasticsearch.yml` file), restart the cluster. + +. Use the <> to verify that the +local cluster has connected to the remote cluster. The response should have +`"connected": true` and not have `"cluster_credentials": "::es_redacted::"`. + +. Restart any persistent tasks that you've stopped earlier. \ No newline at end of file diff --git a/docs/reference/modules/cluster/remote-clusters-remote-info.asciidoc b/docs/reference/modules/cluster/remote-clusters-remote-info.asciidoc new file mode 100644 index 0000000000000..c1ab215b66413 --- /dev/null +++ b/docs/reference/modules/cluster/remote-clusters-remote-info.asciidoc @@ -0,0 +1,32 @@ +[source,console] +---- +GET /_remote/info +---- +// TEST[skip:TODO] + +The API should return `"connected" : true`. When using +<>, it should also return +`"cluster_credentials": "::es_redacted::"`. + +[source,console-result,subs=attributes+] +---- +{ + "cluster_one" : { + "seeds" : [ + "127.0.0.1:9443" + ], + "connected" : true, <1> + "num_nodes_connected" : 1, + "max_connections_per_cluster" : 3, + "initial_connect_timeout" : "30s", + "skip_unavailable" : false, + "cluster_credentials": "::es_redacted::", <2> + "mode" : "sniff" + } +} +---- +// TEST[skip:TODO] +<1> The remote cluster has connected successfully. +<2> If present, indicates the remote cluster has connected using +<> instead of +<>. \ No newline at end of file diff --git a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc index ff16666d52172..85e63918e6fa9 100644 --- a/docs/reference/modules/cluster/remote-clusters-settings.asciidoc +++ b/docs/reference/modules/cluster/remote-clusters-settings.asciidoc @@ -65,12 +65,10 @@ mode are described separately. is used as the fallback setting. -// TODO: fix the link to new page of API key based remote clusters - `cluster.remote..credentials` (<>):: beta:[] - Per cluster setting for configuring remote clusters with the API Key based model. + Per cluster setting for configuring <>. This setting takes the encoded value of a <> and must be set in the <> on each node in the cluster. diff --git a/docs/reference/modules/cluster/remote-clusters-troubleshooting.asciidoc b/docs/reference/modules/cluster/remote-clusters-troubleshooting.asciidoc new file mode 100644 index 0000000000000..5dc6ab8c08c88 --- /dev/null +++ b/docs/reference/modules/cluster/remote-clusters-troubleshooting.asciidoc @@ -0,0 +1,394 @@ +[[remote-clusters-troubleshooting]] +=== Troubleshooting remote clusters + +++++ +Troubleshooting +++++ + +You may encounter several issues when setting up a remote cluster for {ccr} or +{ccs}. + +[[remote-clusters-troubleshooting-general]] +==== General troubleshooting + +[[remote-clusters-troubleshooting-check-connection]] +===== Checking whether a remote cluster has connected successfully + +A successful call to the cluster settings update API for adding or updating +remote clusters does not necessarily mean the configuration is successful. +Use the <> to verify that a local +cluster is successfully connected to a remote cluster. + +include::remote-clusters-remote-info.asciidoc[] + +[[remote-clusters-troubleshooting-enable-server]] +===== Enabling the remote cluster server + +When using API key authentication, cross-cluster traffic happens on the remote +cluster interface, instead of the transport interface. The remote cluster +interface is not enabled by default. This means a node is not ready to accept +incoming cross-cluster requests by default, while it is ready to send outgoing +cross-cluster requests. Ensure you've enabled the remote cluster server on every +node of the remote cluster. In `elasticsearch.yml`: + +* Set <> to +`true`. +* Configure the bind and publish address for remote cluster server traffic, for +example using <>. Without +configuring the address, remote cluster traffic may be bound to the local +interface, and remote clusters running on other machines can't connect. +* Optionally, configure the remote server port using +<> (defaults to `9443`). + +[[remote-clusters-troubleshooting-common-issues]] +==== Common issues + +The following issues are listed in the order they may occur while setting up a +remote cluster. + +[[remote-clusters-not-reachable]] +===== Remote cluster not reachable + +====== Symptom + +A local cluster may not be able to reach a remote cluster for many reasons. For +example, the remote cluster server may not be enabled, an incorrect host or port +may be configured, or a firewall may be blocking traffic. When a remote cluster +is not reachable, check the logs of the local cluster for a `connect_exception`. + +When the remote cluster is configured using proxy mode: +[source,txt,subs=+quotes] +---- +[2023-06-28T16:36:47,264][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.ConnectTransportException: [][192.168.0.42:9443] *connect_exception* +---- + +When the remote cluster is configured using sniff mode: +[source,txt,subs=+quotes] +---- +[2023-06-28T16:38:37,731][WARN ][o.e.t.SniffConnectionStrategy] [local-node] fetching nodes from external cluster [my] failed +org.elasticsearch.transport.ConnectTransportException: [][192.168.0.42:9443] *connect_exception* +---- + +====== Resolution + +* Check the host and port for the remote cluster are correct. +* Ensure the <> on the remote cluster. +* Ensure no firewall is blocking the communication. + +[[remote-clusters-troubleshooting-tls-trust]] +===== TLS trust not established + +TLS can be misconfigured on the local or the remote cluster. The result is that +the local cluster does not trust the certificate presented by the remote +cluster. + +====== Symptom + +The local cluster logs `failed to establish trust with server`: + +[source,txt,subs=+quotes] +---- +[2023-06-29T09:40:55,465][WARN ][o.e.c.s.DiagnosticTrustManager] [local-node] *failed to establish trust with server* at [192.168.0.42]; the server provided a certificate with subject name [CN=remote_cluster], fingerprint [529de35e15666ffaa26afa50876a2a48119db03a], no keyUsage and no extendedKeyUsage; the certificate is valid between [2023-01-29T12:08:37Z] and [2032-08-29T12:08:37Z] (current time is [2023-08-16T23:40:55.464275Z], certificate dates are valid); the session uses cipher suite [TLS_AES_256_GCM_SHA384] and protocol [TLSv1.3]; the certificate has subject alternative names [DNS:localhost,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1,DNS:localhost4,DNS:localhost6,DNS:localhost.localdomain,DNS:localhost4.localdomain4,IP:192.168.0.42]; the certificate is issued by [CN=Elastic Auto RemoteCluster CA] but the server did not provide a copy of the issuing certificate in the certificate chain; this ssl context ([(shared) (with trust configuration: JDK-trusted-certs)]) is not configured to trust that issuer but trusts [97] other issuers +sun.security.validator.ValidatorException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target +---- + +The remote cluster logs `client did not trust this server's certificate`: + +[source,txt,subs=+quotes] +---- +[2023-06-29T09:40:55,478][WARN ][o.e.x.c.s.t.n.SecurityNetty4Transport] [remote-node] *client did not trust this server's certificate*, closing connection Netty4TcpChannel{localAddress=/192.168.0.42:9443, remoteAddress=/192.168.0.84:57305, profile=_remote_cluster} +---- + +====== Resolution + +Read the warn log message on the local cluster carefully to determine the exact +cause of the failure. For example: + +* Is the remote cluster certificate not signed by a trusted CA? This is the most +likely cause. +* Is hostname verification failing? +* Is the certificate expired? + +Once you know the cause, you should be able to fix it by adjusting the remote +cluster related SSL settings on either the local cluster or the remote cluster. + +Often, the issue is on the local cluster. For example, fix it by configuring necessary +trusted CAs (`xpack.security.remote_cluster_client.ssl.certificate_authorities`). + +If you change the `elasticsearch.yml` file, the associated cluster needs to be +restarted for the changes to take effect. + +[[remote-clusters-troubleshooting-api-key]] +==== API key authentication issues + +[[remote-clusters-troubleshooting-transport-port-api-key]] +===== Connecting to transport port when using API key authentication + +When using API key authentication, a local cluster should connect to a remote +cluster's remote cluster server port (defaults to `9443`) instead of the +transport port (defaults to `9300`). A misconfiguration can lead to a number of +symptoms: + +====== Symptom 1 + +It's recommended to use different CAs and certificates for the transport +interface and the remote cluster server interface. If this recommendation is +followed, a remote cluster client node does not trust the server certificate +presented by a remote cluster on the transport interface. + +The local cluster logs `failed to establish trust with server`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:48:46,575][WARN ][o.e.c.s.DiagnosticTrustManager] [local-node] *failed to establish trust with server* at [1192.168.0.42]; the server provided a certificate with subject name [CN=transport], fingerprint [c43e628be2a8aaaa4092b82d78f2bc206c492322], no keyUsage and no extendedKeyUsage; the certificate is valid between [2023-01-29T12:05:53Z] and [2032-08-29T12:05:53Z] (current time is [2023-06-28T02:48:46.574738Z], certificate dates are valid); the session uses cipher suite [TLS_AES_256_GCM_SHA384] and protocol [TLSv1.3]; the certificate has subject alternative names [DNS:localhost,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1,DNS:localhost4,DNS:localhost6,DNS:localhost.localdomain,DNS:localhost4.localdomain4,IP:192.168.0.42]; the certificate is issued by [CN=Elastic Auto Transport CA] but the server did not provide a copy of the issuing certificate in the certificate chain; this ssl context ([xpack.security.remote_cluster_client.ssl (with trust configuration: PEM-trust{/rcs2/ssl/remote-cluster-ca.crt})]) is not configured to trust that issuer, it only trusts the issuer [CN=Elastic Auto RemoteCluster CA] with fingerprint [ba2350661f66e46c746c1629f0c4b645a2587ff4] +sun.security.validator.ValidatorException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target +---- + +The remote cluster logs `client did not trust this server's certificate`: +[source,txt,subs=+quotes] +---- +[2023-06-28T12:48:46,584][WARN ][o.e.x.c.s.t.n.SecurityNetty4Transport] [remote-node] *client did not trust this server's certificate*, closing connection Netty4TcpChannel{localAddress=/192.168.0.42:9309, remoteAddress=/192.168.0.84:60810, profile=default} +---- + +====== Symptom 2 + +The CA and certificate can be shared between the transport and remote cluster +server interface. Since a remote cluster client does not have a client +certificate by default, the server will fail to verify the client certificate. + +The local cluster logs `Received fatal alert: bad_certificate`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:43:30,705][WARN ][o.e.t.TcpTransport ] [local-node] exception caught on transport layer [Netty4TcpChannel{localAddress=/192.168.0.84:60738, remoteAddress=/192.168.0.42:9309, profile=_remote_cluster}], closing connection +io.netty.handler.codec.DecoderException: javax.net.ssl.SSLHandshakeException: *Received fatal alert: bad_certificate* +---- + +The remote cluster logs `Empty client certificate chain`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:43:30,772][WARN ][o.e.t.TcpTransport ] [remote-node] exception caught on transport layer [Netty4TcpChannel{localAddress=/192.168.0.42:9309, remoteAddress=/192.168.0.84:60783, profile=default}], closing connection +io.netty.handler.codec.DecoderException: javax.net.ssl.SSLHandshakeException: *Empty client certificate chain* +---- + +====== Symptom 3 + +If the remote cluster client is configured for mTLS and provides a valid client +certificate, the connection fails because the client does not send the expected +authentication header. + +The local cluster logs `missing authentication`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:04:52,710][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9309][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: *missing authentication* credentials for action [cluster:internal/remote_cluster/handshake] +---- + +This does not show up in the logs of the remote cluster. + +====== Symptom 4 + +If anonymous access is enabled on the remote cluster and it does not require +authentication, depending on the privileges of the anonymous user, the local +cluster may log the following. + +If the anonymous user does not the have necessary privileges to make a +connection, the local cluster logs `unauthorized`: + +[source,txt,subs=+quotes] +---- +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9309][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: action [cluster:internal/remote_cluster/handshake] is *unauthorized* for user [anonymous_foo] with effective roles [reporting_user], this action is granted by the cluster privileges [cross_cluster_search,cross_cluster_replication,manage,all] +---- + +If the anonymous user has necessary privileges, for example it is a superuser, +the local cluster logs `requires channel profile to be [_remote_cluster], +but got [default]`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:09:52,031][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9309][cluster:internal/remote_cluster/handshake] +Caused by: java.lang.IllegalArgumentException: remote cluster handshake action *requires channel profile to be [_remote_cluster], but got [default]* +---- + +====== Resolution + +Check the port number and ensure you are indeed connecting to the remote cluster +server instead of the transport interface. + +[[remote-clusters-troubleshooting-no-api-key]] +===== Connecting without a cross-cluster API key + +A local cluster uses the presence of a cross-cluster API key to determine the +model with which it connects to a remote cluster. If a cross-cluster API key is +present, it uses API key based authentication. Otherwise, it uses certificate +based authentication. You can check what model is being used with the <> on the local cluster: + +include::remote-clusters-remote-info.asciidoc[] + +Besides checking the response of the remote cluster info API, you can also check +the logs. + +====== Symptom 1 + +If no cross-cluster API key is used, the local cluster uses the certificate +based authentication method, and connects to the remote cluster using the TLS +configuration of the transport interface. If the remote cluster has different +TLS CA and certificate for transport and remote cluster server interfaces (which +is the recommendation), TLS verification will fail. + +The local cluster logs `failed to establish trust with server`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:51:06,452][WARN ][o.e.c.s.DiagnosticTrustManager] [local-node] *failed to establish trust with server* at []; the server provided a certificate with subject name [CN=remote_cluster], fingerprint [529de35e15666ffaa26afa50876a2a48119db03a], no keyUsage and no extendedKeyUsage; the certificate is valid between [2023-01-29T12:08:37Z] and [2032-08-29T12:08:37Z] (current time is [2023-06-28T02:51:06.451581Z], certificate dates are valid); the session uses cipher suite [TLS_AES_256_GCM_SHA384] and protocol [TLSv1.3]; the certificate has subject alternative names [DNS:localhost,DNS:localhost6.localdomain6,IP:127.0.0.1,IP:0:0:0:0:0:0:0:1,DNS:localhost4,DNS:localhost6,DNS:localhost.localdomain,DNS:localhost4.localdomain4,IP:192.168.0.42]; the certificate is issued by [CN=Elastic Auto RemoteCluster CA] but the server did not provide a copy of the issuing certificate in the certificate chain; this ssl context ([xpack.security.transport.ssl (with trust configuration: PEM-trust{/rcs2/ssl/transport-ca.crt})]) is not configured to trust that issuer, it only trusts the issuer [CN=Elastic Auto Transport CA] with fingerprint [bbe49e3f986506008a70ab651b188c70df104812] +sun.security.validator.ValidatorException: PKIX path building failed: sun.security.provider.certpath.SunCertPathBuilderException: unable to find valid certification path to requested target +---- + +The remote cluster logs `client did not trust this server's certificate`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T12:52:16,914][WARN ][o.e.x.c.s.t.n.SecurityNetty4Transport] [remote-node] *client did not trust this server's certificate*, closing connection Netty4TcpChannel{localAddress=/192.168.0.42:9443, remoteAddress=/192.168.0.84:60981, profile=_remote_cluster} +---- + +====== Symptom 2 + +Even if TLS verification is not an issue, the connection fails due to missing +credentials. + +The local cluster logs `Please ensure you have configured remote cluster credentials`: + +[source,txt,subs=+quotes] +---- +Caused by: java.lang.IllegalArgumentException: Cross cluster requests through the dedicated remote cluster server port require transport header [_cross_cluster_access_credentials] but none found. *Please ensure you have configured remote cluster credentials* on the cluster originating the request. +---- + +This does not show up in the logs of the remote cluster. + +====== Resolution + +Add the cross-cluster API key to {es} keystore on every node of the local +cluster. Restart the local cluster to reload the keystore. + +[[remote-clusters-troubleshooting-wrong-api-key-type]] +===== Using the wrong API key type + +API key based authentication requires +<>. It does +not work with <>. + +====== Symptom + +The local cluster logs `authentication expected API key type of [cross_cluster]`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:26:53,962][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9443][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: *authentication expected API key type of [cross_cluster]*, but API key [agZXJocBmA2beJfq2yKu] has type [rest] +---- + +This does not show up in the logs of the remote cluster. + +====== Resolution + +Ask the remote cluster administrator to create and distribute a +<>. Replace the +existing API key in the {es} keystore with this cross-cluster API key on every +node of the local cluster. Restart the local cluster for keystore changes to +take effect. + +[[remote-clusters-troubleshooting-non-valid-api-key]] +===== Invalid API key + +A cross-cluster API can fail to authenticate. For example, when its credentials +are incorrect, or if it's invalidated or expired. + +====== Symptom + +The local cluster logs `unable to authenticate`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:22:58,264][WARN ][o.e.t.ProxyConnectionStrategy] [local-node] failed to open any proxy connections to cluster [my] +org.elasticsearch.transport.RemoteTransportException: [remote-node][192.168.0.42:9443][cluster:internal/remote_cluster/handshake] +Caused by: org.elasticsearch.ElasticsearchSecurityException: *unable to authenticate* user [agZXJocBmA2beJfq2yKu] for action [cluster:internal/remote_cluster/handshake] +---- + +The remote cluster logs `Authentication using apikey failed`: + +[source,txt,subs=+quotes] +---- +[2023-06-28T13:24:38,744][WARN ][o.e.x.s.a.ApiKeyAuthenticator] [remote-node] *Authentication using apikey failed* - invalid credentials for API key [agZXJocBmA2beJfq2yKu] +---- + +====== Resolution + +Ask the remote cluster administrator to create and distribute a +<>. Replace the +existing API key in the {es} keystore with this cross-cluster API key on every +node of the local cluster. Restart the local cluster for keystore changes to +take effect. + +[[remote-clusters-troubleshooting-insufficient-privileges]] +===== API key or local user has insufficient privileges + +The effective permission for a local user running requests on a remote cluster +is determined by the intersection of the cross-cluster API key's privileges and +the local user's `remote_indices` privileges. + +====== Symptom + +Request failures due to insufficient privileges result in API responses like: + +[source,js,subs=+quotes] +---- +{ + "type": "security_exception", + "reason": "action [indices:data/read/search] towards remote cluster is *unauthorized for user* [foo] with assigned roles [foo-role] authenticated by API key id [agZXJocBmA2beJfq2yKu] of user [elastic-admin] on indices [cd], this action is granted by the index privileges [read,all]" +} +---- +// NOTCONSOLE + +This does not show up in any logs. + +====== Resolution + +. Check that the local user has the necessary `remote_indices` privileges. Grant sufficient `remote_indices` privileges if necessary. +. If permission is not an issue locally, ask the remote cluster administrator to +create and distribute a +<>. Replace the +existing API key in the {es} keystore with this cross-cluster API key on every +node of the local cluster. Restart the local cluster for keystore changes to +take effect. + +[[remote-clusters-troubleshooting-no-remote_indices-privileges]] +===== Local user has no `remote_indices` privileges + +This is a special case of insufficient privileges. In this case, the local user +has no `remote_indices` privileges at all for the target remote cluster. {es} +can detect that and issue a more explicit error response. + +====== Symptom + +This results in API responses like: + +[source,js,subs=+quotes] +---- +{ + "type": "security_exception", + "reason": "action [indices:data/read/search] towards remote cluster [my] is unauthorized for user [foo] with effective roles [] (assigned roles [foo-role] were not found) because *no remote indices privileges apply for the target cluster*" +} +---- +// NOTCONSOLE + +====== Resolution + +Grant sufficient `remote_indices` privileges to the local user. diff --git a/docs/reference/modules/discovery/fault-detection.asciidoc b/docs/reference/modules/discovery/fault-detection.asciidoc index 001763430cf4b..dfa49e5b0d9af 100644 --- a/docs/reference/modules/discovery/fault-detection.asciidoc +++ b/docs/reference/modules/discovery/fault-detection.asciidoc @@ -35,7 +35,7 @@ starting from the beginning of the cluster state update. Refer to [[cluster-fault-detection-troubleshooting]] ==== Troubleshooting an unstable cluster - +//tag::troubleshooting[] Normally, a node will only leave a cluster if deliberately shut down. If a node leaves the cluster unexpectedly, it's important to address the cause. A cluster in which nodes leave unexpectedly is unstable and can create several issues. @@ -143,6 +143,7 @@ removes the node removed after three consecutively failed health checks. Refer to <> for information about the settings which control this mechanism. +[discrete] ===== Diagnosing `disconnected` nodes Nodes typically leave the cluster with reason `disconnected` when they shut @@ -181,6 +182,7 @@ In extreme cases, you may need to take packet captures using `tcpdump` to determine whether messages between nodes are being dropped or rejected by some other device on the network. +[discrete] ===== Diagnosing `lagging` nodes {es} needs every node to process cluster state updates reasonably quickly. If a @@ -225,6 +227,7 @@ To reconstruct the output, base64-decode the data and decompress it using cat lagdetector.log | sed -e 's/.*://' | base64 --decode | gzip --decompress ---- +[discrete] ===== Diagnosing `follower check retry count exceeded` nodes Nodes sometimes leave the cluster with reason `follower check retry count @@ -260,6 +263,7 @@ By default the follower checks will time out after 30s, so if node departures are unpredictable then capture stack dumps every 15s to be sure that at least one stack dump was taken at the right time. +[discrete] ===== Diagnosing `ShardLockObtainFailedException` failures If a node leaves and rejoins the cluster then {es} will usually shut down and @@ -295,3 +299,4 @@ To reconstruct the output, base64-decode the data and decompress it using ---- cat shardlock.log | sed -e 's/.*://' | base64 --decode | gzip --decompress ---- +//end::troubleshooting[] \ No newline at end of file diff --git a/docs/reference/modules/remote-clusters.asciidoc b/docs/reference/modules/remote-clusters.asciidoc index 472ff8f4d07e4..8a0feefeaf21f 100644 --- a/docs/reference/modules/remote-clusters.asciidoc +++ b/docs/reference/modules/remote-clusters.asciidoc @@ -94,4 +94,8 @@ include::cluster/remote-clusters-api-key.asciidoc[] include::cluster/remote-clusters-cert.asciidoc[] +include::cluster/remote-clusters-migration.asciidoc[] + include::cluster/remote-clusters-settings.asciidoc[] + +include::cluster/remote-clusters-troubleshooting.asciidoc[] diff --git a/docs/reference/modules/threadpool.asciidoc b/docs/reference/modules/threadpool.asciidoc index 4ca0b1339ac40..ed4becbfbb6d0 100644 --- a/docs/reference/modules/threadpool.asciidoc +++ b/docs/reference/modules/threadpool.asciidoc @@ -42,7 +42,7 @@ There are several thread pools, but the important ones include: size of `16`. `write`:: - For single-document index/delete/update and bulk requests. Thread pool type + For single-document index/delete/update, ingest processors, and bulk requests. Thread pool type is `fixed` with a size of <>, queue_size of `10000`. The maximum size for this pool is `pass:[1 + ]`<>. diff --git a/docs/reference/query-dsl/pinned-query.asciidoc b/docs/reference/query-dsl/pinned-query.asciidoc index f8821d8e87463..dd437f9d5d106 100644 --- a/docs/reference/query-dsl/pinned-query.asciidoc +++ b/docs/reference/query-dsl/pinned-query.asciidoc @@ -42,7 +42,7 @@ You can specify the following attributes for each document: (Required, string) The unique <>. `_index`:: -(Required, string) The index that contains the document. +(Optional, string) The index that contains the document. -- `organic`:: Any choice of query used to rank documents which will be ranked below the "pinned" documents. @@ -59,16 +59,11 @@ GET /_search "pinned": { "docs": [ { - "_index": "my-index-000001", + "_index": "my-index-000001", <1> "_id": "1" }, { - "_index": "my-index-000001", - "_id": "4" - }, - { - "_index": "my-index-000002", - "_id": "100" + "_id": "4" <2> } ], "organic": { @@ -80,3 +75,6 @@ GET /_search } } -------------------------------------------------- + +<1> The document with id `1` from `my-index-000001` will be the first result. +<2> When `_index` is missing, all documents with id `4` from the queried indices will be pinned with the same score. diff --git a/docs/reference/release-notes.asciidoc b/docs/reference/release-notes.asciidoc index f8a602aeeea63..df8c167a1ae88 100644 --- a/docs/reference/release-notes.asciidoc +++ b/docs/reference/release-notes.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. * <> * <> +* <> * <> * <> * <> @@ -50,6 +51,7 @@ This section summarizes the changes in each release. include::release-notes/8.11.0.asciidoc[] include::release-notes/8.10.0.asciidoc[] +include::release-notes/8.9.2.asciidoc[] include::release-notes/8.9.1.asciidoc[] include::release-notes/8.9.0.asciidoc[] include::release-notes/8.8.2.asciidoc[] diff --git a/docs/reference/release-notes/8.9.2.asciidoc b/docs/reference/release-notes/8.9.2.asciidoc new file mode 100644 index 0000000000000..d4244eab27645 --- /dev/null +++ b/docs/reference/release-notes/8.9.2.asciidoc @@ -0,0 +1,19 @@ +[[release-notes-8.9.2]] +== {es} version 8.9.2 + +Also see <>. + +[[bug-8.9.2]] +[float] +=== Bug fixes + +Data streams:: +* Avoid lifecycle NPE in the data stream lifecycle usage API {es-pull}98260[#98260] + +Geo:: +* Fix mvt error when returning partial results {es-pull}98765[#98765] (issue: {es-issue}98730[#98730]) + +Ingest Node:: +* Revert "Add mappings for enrich fields" {es-pull}98683[#98683] + + diff --git a/docs/reference/search-application/apis/get-search-application.asciidoc b/docs/reference/search-application/apis/get-search-application.asciidoc index 5beafd96c2e6b..53e6df0262db8 100644 --- a/docs/reference/search-application/apis/get-search-application.asciidoc +++ b/docs/reference/search-application/apis/get-search-application.asciidoc @@ -95,6 +95,7 @@ A sample response: ---- { "name": "my-app", + "indices": [ "index1", "index2" ], "updated_at_millis": 1682105622204, "template": { "script": { diff --git a/docs/reference/search/search-your-data/search-across-clusters.asciidoc b/docs/reference/search/search-your-data/search-across-clusters.asciidoc index 69faad95343b9..39c272cfc6b78 100644 --- a/docs/reference/search/search-your-data/search-across-clusters.asciidoc +++ b/docs/reference/search/search-your-data/search-across-clusters.asciidoc @@ -76,17 +76,19 @@ PUT _cluster/settings "remote": { "cluster_one": { "seeds": [ - "127.0.0.1:9300" - ] + "35.238.149.1:9300" + ], + "skip_unavailable": true }, "cluster_two": { "seeds": [ - "127.0.0.1:9301" - ] + "35.238.149.2:9300" + ], + "skip_unavailable": false }, - "cluster_three": { + "cluster_three": { <1> "seeds": [ - "127.0.0.1:9302" + "35.238.149.3:9300" ] } } @@ -95,7 +97,12 @@ PUT _cluster/settings } -------------------------------- // TEST[setup:host] -// TEST[s/127.0.0.1:930\d+/\${transport_host}/] +// TEST[s/35.238.149.\d+:930\d+/\${transport_host}/] + +<1> Since `skip_unavailable` was not set on `cluster_three`, it uses +the default of `false`. See the <> +section for details. + [discrete] [[ccs-search-remote-cluster]] @@ -111,6 +118,7 @@ The following <> API request searches the -------------------------------------------------- GET /cluster_one:my-index-000001/_search { + "size": 1, "query": { "match": { "user.id": "kimchy" @@ -122,7 +130,9 @@ GET /cluster_one:my-index-000001/_search // TEST[continued] // TEST[setup:my_index] -The API returns the following response: +The API returns the following response. Note that when you +search one or more remote clusters, a `_clusters` section is +included to provide information about the search on each cluster. [source,console-result] -------------------------------------------------- @@ -130,8 +140,8 @@ The API returns the following response: "took": 150, "timed_out": false, "_shards": { - "total": 3, - "successful": 3, + "total": 12, + "successful": 12, "failed": 0, "skipped": 0 }, @@ -141,13 +151,13 @@ The API returns the following response: "skipped": 0, "details": { "cluster_one": { <1> - "status": "successful", - "indices": "my-index-000001", - "took": 148, + "status": "successful", <2> + "indices": "my-index-000001", <3> + "took": 148, <4> "timed_out": false, - "_shards": { - "total": 3, - "successful": 3, + "_shards": { <5> + "total": 12, + "successful": 12, "skipped": 0, "failed": 0 } @@ -162,7 +172,7 @@ The API returns the following response: "max_score": 1, "hits": [ { - "_index": "cluster_one:my-index-000001", <2> + "_index": "cluster_one:my-index-000001", <6> "_id": "0", "_score": 1, "_source": { @@ -185,14 +195,22 @@ The API returns the following response: // TESTRESPONSE[s/"took": 150/"took": "$body.took"/] // TESTRESPONSE[s/"max_score": 1/"max_score": "$body.hits.max_score"/] // TESTRESPONSE[s/"_score": 1/"_score": "$body.hits.hits.0._score"/] -// TESTRESPONSE[s/"total": 3/"total": "$body._shards.total"/] -// TESTRESPONSE[s/"successful": 3/"successful": "$body._shards.successful"/] +// TESTRESPONSE[s/"total": 12/"total": "$body._shards.total"/] +// TESTRESPONSE[s/"successful": 12/"successful": "$body._shards.successful"/] // TESTRESPONSE[s/"skipped": 0/"skipped": "$body._shards.skipped"/] -// TESTRESPONSE[s/"failed": 3/"failed": "$body._shards.failed"/] // TESTRESPONSE[s/"took": 148/"took": "$body._clusters.details.cluster_one.took"/] -<1> The details section shows information about the search on each cluster. -<2> The search response body includes the name of the remote cluster in the +<1> The `_clusters/details` section shows metadata about the search on each cluster. +<2> The cluster status can be one of: *running*, *successful* (searches on all shards +were successful), *partial* (searches on at least one shard of the cluster was successful +and at least one failed), *skipped* (the search failed on a cluster marked with +`skip_unavailable`=`true`) or *failed* (the search failed on a cluster marked with +`skip_unavailable`=`false`). +<3> The index expression supplied by the user. If you provide a wildcard such as `logs-*`, +this section will show the value with the wildcard, not the concrete indices being searched. +<4> How long (in milliseconds) the sub-search took on that cluster. +<5> The shard details for the sub-search on that cluster. +<6> The search response body includes the name of the remote cluster in the `_index` parameter. @@ -204,8 +222,9 @@ The API returns the following response: The following <> API request searches the `my-index-000001` index on three clusters: -* Your local cluster -* Two remote clusters, `cluster_one` and `cluster_two` +* The local ("querying") cluster, with 10 shards +* Two remote clusters, `cluster_one`, with 12 shards and `cluster_two` +with 6 shards. [source,console] -------------------------------------------------- @@ -230,8 +249,8 @@ The API returns the following response: "timed_out": false, "num_reduce_phases": 4, "_shards": { - "total": 12, - "successful": 12, + "total": 28, + "successful": 28, "failed": 0, "skipped": 0 }, @@ -246,8 +265,8 @@ The API returns the following response: "took": 21, "timed_out": false, "_shards": { - "total": 5, - "successful": 5, + "total": 10, + "successful": 10, "skipped": 0, "failed": 0 } @@ -258,8 +277,8 @@ The API returns the following response: "took": 48, "timed_out": false, "_shards": { - "total": 4, - "successful": 4, + "total": 12, + "successful": 12, "skipped": 0, "failed": 0 } @@ -270,8 +289,8 @@ The API returns the following response: "took": 141, "timed_out": false, "_shards": { - "total" : 3, - "successful" : 3, + "total" : 6, + "successful" : 6, "skipped": 0, "failed": 0 } @@ -344,16 +363,16 @@ The API returns the following response: // TESTRESPONSE[s/"max_score": 1/"max_score": "$body.hits.max_score"/] // TESTRESPONSE[s/"_score": 1/"_score": "$body.hits.hits.0._score"/] // TESTRESPONSE[s/"_score": 2/"_score": "$body.hits.hits.1._score"/] -// TESTRESPONSE[s/"total": 12/"total": "$body._shards.total"/] -// TESTRESPONSE[s/"successful": 12/"successful": "$body._shards.successful"/] -// TESTRESPONSE[s/"total": 5/"total": "$body._clusters.details.(local)._shards.total"/] -// TESTRESPONSE[s/"successful": 5/"successful": "$body._clusters.details.(local)._shards.successful"/] +// TESTRESPONSE[s/"total": 28/"total": "$body._shards.total"/] +// TESTRESPONSE[s/"successful": 28/"successful": "$body._shards.successful"/] +// TESTRESPONSE[s/"total": 10/"total": "$body._clusters.details.(local)._shards.total"/] +// TESTRESPONSE[s/"successful": 10/"successful": "$body._clusters.details.(local)._shards.successful"/] // TESTRESPONSE[s/"took": 21/"took": "$body._clusters.details.(local).took"/] -// TESTRESPONSE[s/"total": 4/"total": "$body._clusters.details.cluster_one._shards.total"/] -// TESTRESPONSE[s/"successful": 4/"successful": "$body._clusters.details.cluster_one._shards.successful"/] +// TESTRESPONSE[s/"total": 12/"total": "$body._clusters.details.cluster_one._shards.total"/] +// TESTRESPONSE[s/"successful": 12/"successful": "$body._clusters.details.cluster_one._shards.successful"/] // TESTRESPONSE[s/"took": 48/"took": "$body._clusters.details.cluster_one.took"/] -// TESTRESPONSE[s/"total" : 3/"total": "$body._clusters.details.cluster_two._shards.total"/] -// TESTRESPONSE[s/"successful" : 3/"successful": "$body._clusters.details.cluster_two._shards.successful"/] +// TESTRESPONSE[s/"total" : 6/"total": "$body._clusters.details.cluster_two._shards.total"/] +// TESTRESPONSE[s/"successful" : 6/"successful": "$body._clusters.details.cluster_two._shards.successful"/] // TESTRESPONSE[s/"took": 141/"took": "$body._clusters.details.cluster_two.took"/] <1> The local (querying) cluster is identified as "(local)". @@ -368,14 +387,12 @@ means the document came from the local cluster. === Using async search for {ccs} with ccs_minimize_roundtrips=true Remote clusters can be queried asynchronously using the <> API. -Async searches accept a <> parameter -that defaults to `false`. See <> to learn more about this option. +A {ccs} accepts a <> parameter. For +asynchronous searches it defaults to `false`. (Note: for synchronous searches it defaults to `true`.) +See <> to learn more about this option. The following request does an asynchronous search of the `my-index-000001` index using -`ccs_minimize_roundtrips=true` against three clusters: - -* The local cluster, with 8 shards -* Two remote clusters, `cluster_one` and `cluster_two`, with 10 shards each +`ccs_minimize_roundtrips=true` against three clusters (same ones as the previous example). [source,console] -------------------------------------------------- @@ -408,7 +425,7 @@ The API returns the following response: "timed_out": false, "num_reduce_phases": 0, "_shards": { - "total": 8, <2> + "total": 10, <2> "successful": 0, "failed": 0, "skipped": 0 @@ -416,7 +433,24 @@ The API returns the following response: "_clusters": { <3> "total" : 3, "successful" : 0, - "skipped": 0 + "skipped": 0, + "details": { + "(local)": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false + }, + "cluster_one": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false + }, + "cluster_one": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false + } + } }, "hits": { "total" : { @@ -429,16 +463,17 @@ The API returns the following response: } } -------------------------------------------------- -// TEST[skip: terminated_early is absent from final results so is hard to reproduce here] +// TEST[skip: hard to reproduce initial state] <1> The async search id. <2> When `ccs_minimize_roundtrips` = `true` and searches on the remote clusters are still running, this section indicates the number of shards in scope for the local cluster only. This will be updated to include the total number of shards -across all clusters only when the search is completed. +across all clusters only when the search is completed. When +`ccs_minimize_roundtrips`= `false`, the total shard count is known up front and +will be correct. <3> The `_clusters` section indicates that 3 clusters are in scope for the search -and all are currently running (since `successful` and `skipped` both equal 0). - +and all are currently running. If you query the <> endpoint while the query is still running, you will see an update in the `_clusters` and `_shards` section of @@ -465,15 +500,39 @@ Response: "timed_out": false, "terminated_early": false, "_shards": { - "total": 8, - "successful": 8, <1> + "total": 10, + "successful": 10, <1> "skipped": 0, "failed": 0 }, "_clusters": { "total": 3, "successful": 1, <2> - "skipped": 0 + "skipped": 0, + "details": { + "(local)": { + "status": "successful", + "indices": "my-index-000001", + "took": 2034, + "timed_out": false, + "_shards": { + "total": 10, + "successful": 10, + "skipped": 0, + "failed": 0 + } + }, + "cluster_one": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false + }, + "cluster_two": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false + } + } }, "hits": { "total": { @@ -486,20 +545,21 @@ Response: } } -------------------------------------------------- -// TEST[skip: terminated_early is absent from final results so is hard to reproduce here] +// TEST[skip: hard to reproduce intermediate results] <1> All the local cluster shards have completed. <2> The local cluster search has completed, so the "successful" clusters entry -is set to 1. The `_clusters` response section will not be updated for the remote clusters -until all remote searches have finished (either successfully or been skipped). +is set to 1. The `_clusters` response metadata will be updated as each cluster +finishes. <3> Number of hits from the local cluster search. Final hits are not shown until searches on all clusters have been completed and merged. -After searches on all the clusters have completed, when you query the -<> endpoint, you will see the final -status of the `_clusters` and `_shards` section as well as the hits. +After searches on all the clusters have completed, querying the +<> endpoint will show the final +status of the `_clusters` and `_shards` section as well as the hits +and any aggregation results. [source,console] -------------------------------------------------- @@ -610,12 +670,341 @@ were searched across all clusters and that all were successful. <3> The `_clusters` section shows that searches on all 3 clusters were successful. +[discrete] +[[cross-cluster-search-failures]] +=== {ccs-cap} failures + +Failures during a {ccs} can result in one of two conditions: + +. partial results (2xx HTTP status code) +. a failed search (4xx or 5xx HTTP status code) + +Failure details will be present in the search response in both cases. + +A search will be failed if a cluster marked with `skip_unavailable`=`false` +is unavailable, disconnects during the search, or has search failures on +all shards. In all other cases, failures will result in partial results. + +Search failures on individual shards will be present in both the `_shards` +section and the `_clusters` section of the response. + +A failed search will have an additional top-level `errors` entry in the response. + +Here is an example of a search with partial results due to a failure on one shard +of one cluster. The search would be similar to ones shown previously. The +`_async_search/status` endpoint is used here to show the completion status and +not show the hits. + +[source,console] +-------------------------------------------------- +GET /_async_search/status/FmpwbThueVB4UkRDeUxqb1l4akIza3cbWEJyeVBPQldTV3FGZGdIeUVabXBldzoyMDIw +-------------------------------------------------- +// TEST[continued s/FmpwbThueVB4UkRDeUxqb1l4akIza3cbWEJyeVBPQldTV3FGZGdIeUVabXBldzoyMDIw/\${body.id}/] + + +Response: + +[source,console-result] +-------------------------------------------------- +{ + "id": "FmpwbThueVB4UkRDeUxqb1l4akIza3cbWEJyeVBPQldTV3FGZGdIeUVabXBldzoyMDIw", + "is_partial": true, <1> + "is_running": false, + "start_time_in_millis": 1692106901478, + "expiration_time_in_millis": 1692538901478, + "completion_time_in_millis": 1692106903547, + "response": { + "took": 2069, + "timed_out": false, + "num_reduce_phases": 4, + "_shards": { + "total": 28, + "successful": 27, + "skipped": 0, + "failed": 1, + "failures": [ <2> + { + "shard": 1, + "index": "cluster_two:my-index-000001", + "node": "LMpUnAu0QEeCUMfg_56sAg", + "reason": { + "type": "query_shard_exception", + "reason": "failed to create query: [my-index-000001][1] exception message here", + "index_uuid": "4F2VWx8RQSeIhUE-nksvCQ", + "index": "cluster_two:my-index-000001", + "caused_by": { + "type": "runtime_exception", + "reason": "runtime_exception: [my-index-000001][1] exception message here" + } + } + } + ] + }, + "_clusters": { + "total": 3, + "successful": 3, <3> + "skipped": 0, + "details": { + "(local)": { + "status": "successful", + "indices": "my-index-000001", + "took": 1753, + "timed_out": false, + "_shards": { + "total": 10, + "successful": 10, + "skipped": 0, + "failed": 0 + } + }, + "cluster_one": { + "status": "successful", + "indices": "my-index-000001", + "took": 2054, + "timed_out": false, + "_shards": { + "total": 12, + "successful": 12, + "skipped": 0, + "failed": 0 + } + }, + "cluster_two": { + "status": "partial", <4> + "indices": "my-index-000001", + "took": 2039, + "timed_out": false, + "_shards": { + "total": 6, + "successful": 5, + "skipped": 0, + "failed": 1 <5> + }, + "failures": [ <6> + { + "shard": 1, + "index": "cluster_two:my-index-000001", + "node": "LMpUnAu0QEeCUMfg_56sAg", + "reason": { + "type": "query_shard_exception", + "reason": "failed to create query: [my-index-000001][1] exception message here", + "index_uuid": "4F2VWx8RQSeIhUE-nksvCQ", + "index": "cluster_two:my-index-000001", + "caused_by": { + "type": "runtime_exception", + "reason": "runtime_exception: [my-index-000001][1] exception message here" + } + } + } + ] + } + } + }, + "hits": { + } + } +} +-------------------------------------------------- +// TEST[skip: hard to reproduce failure results] + + +<1> The search results are marked as partial, since at least one shard search failed. +<2> The `_shards` section includes shard failure info. +<3> Clusters that have partial results are still marked as successful. They are +marked with status "skipped" (or "failed") only if no data was returned from the search. +<4> The `partial` status has been applied to the cluster with partial results. +<5> The failed shard count is shown. +<6> The shard failures are listed under the cluster/details entry also. + + + +Here is an example where both `cluster_one` and `cluster_two` lost connectivity +during a {ccs}. Since `cluster_one` is marked as `skip_unavailable`=`true`, +its status is `skipped` and since `cluster_two` is marked as `skip_unavailable`=`false`, +its status is `failed`. Since there was a `failed` cluster, a top level `error` +is also present and this returns an HTTP status of 500 (not shown). + +If you want the search to still return results even when a cluster is +unavailable, set `skip_unavailable`=`true` for all the remote clusters. + +[source,console] +-------------------------------------------------- +GET /_async_search/FjktRGJ1Y2w1U0phLTRhZnVyeUZ2MVEbWEJyeVBPQldTV3FGZGdIeUVabXBldzo5NzA4 +-------------------------------------------------- +// TEST[continued s/FjktRGJ1Y2w1U0phLTRhZnVyeUZ2MVEbWEJyeVBPQldTV3FGZGdIeUVabXBldzo5NzA4/\${body.id}/] + + +Response: + +[source,console-result] +-------------------------------------------------- +{ + "id": "FjktRGJ1Y2w1U0phLTRhZnVyeUZ2MVEbWEJyeVBPQldTV3FGZGdIeUVabXBldzo5NzA4", + "is_partial": true, + "is_running": false, + "start_time_in_millis": 1692112102650, + "expiration_time_in_millis": 1692544102650, + "completion_time_in_millis": 1692112106177, + "response": { + "took": 3527, + "timed_out": false, + "terminated_early": false, + "_shards": { + "total": 10, <1> + "successful": 10, + "skipped": 0, + "failed": 0 + }, + "_clusters": { + "total": 3, + "successful": 1, + "skipped": 2, <2> + "details": { + "(local)": { + "status": "successful", + "indices": "my-index-000001", + "took": 1473, + "timed_out": false, + "_shards": { + "total": 10, + "successful": 10, + "skipped": 0, + "failed": 0 + } + }, + "cluster_one": { + "status": "skipped", <3> + "indices": "my-index-000001", + "timed_out": false, + "failures": [ + { + "shard": -1, + "index": null, + "reason": { + "type": "node_disconnected_exception", <4> + "reason": "[myhostname1][35.238.149.1:9300][indices:data/read/search] disconnected" + } + } + ] + }, + "cluster_two": { + "status": "failed", <5> + "indices": "my-index-000001", + "timed_out": false, + "failures": [ + { + "shard": -1, + "index": null, + "reason": { + "type": "node_disconnected_exception", + "reason": "[myhostname2][35.238.149.2:9300][indices:data/read/search] disconnected" + } + } + ] + } + } + }, + "hits": { + }, + } + "error": { <6> + "type": "status_exception", + "reason": "error while executing search", + "caused_by": { + "type": "node_disconnected_exception", + "reason": "[myhostname2][35.238.149.2:9300][indices:data/read/search] disconnected" + } + } +} +-------------------------------------------------- +// TEST[skip: hard to reproduce failure results] + +<1> The shard accounting will often be only partial when errors like this occur, +since we need to be able to get shard info from remote clusters on each search. +<2> The skipped counter is used for both "skipped" and "failed" clusters. +<3> `cluster_one` disconnected during the search and it returned no results. +Since it is marked in the remote cluster configuration as `skip_unavailable`=`true`, +its status is "skipped", which will not fail the entire search. +<4> The failures list shows that the remote cluster node disconnected from the +querying cluster. +<5> `cluster_two` status is "failed", since it is marked in the remote cluster +configuration as `skip_unavailable`=`false`. +<6> A top level `error` entry is included when there is a "failed" cluster. + + +[discrete] +[[exclude-problematic-clusters]] +=== Excluding clusters or indices from a {ccs} + +If you use a wildcard to include a large list of clusters and/or indices, +you can explicitly exclude one or more clusters or indices with a `-` minus +sign in front of the cluster or index. + +To exclude an entire cluster, you would put the minus sign in front of the +cluster alias, such as: `-mycluster:*`. When excluding a cluster, you must +use `*` in the index position or an error will be returned. + +To exclude a specific remote index, you would put the minus sign in front +of the index, such as `mycluster:-myindex`. + +*Exclude a remote cluster* + +Here's how you would exclude `cluster_three` from a +{ccs} that uses a wildcard to specify a list of clusters: + +[source,console] +-------------------------------------------------- +POST /my-index-000001,cluster*:my-index-000001,-cluster_three:*/_async_search <1> +{ + "query": { + "match": { + "user.id": "kimchy" + } + }, + "_source": ["user.id", "message", "http.response.status_code"] +} +-------------------------------------------------- +// TEST[continued] +// TEST[s/ccs_minimize_roundtrips=true/ccs_minimize_roundtrips=true&wait_for_completion_timeout=100ms&keep_on_completion=true/] + +<1> The `cluster*` notation would naturally include `cluster_one`, `cluster_two` and `cluster_three`. +To exclude `cluster_three` use a `-` before the cluster name along with a simple wildcard `*` in +the index position. This indicates that you do not want the search to make any contact with +`cluster_three`. + + +*Exclude a remote index* + +Suppose you want to search all indices matching `my-index-*` but you want to exclude +`my-index-000001` on `cluster_three`. Here's how you could do that: + +[source,console] +-------------------------------------------------- +POST /my-index-000001,cluster*:my-index-*,cluster_three:-my-index-000001/_async_search <1> +{ + "query": { + "match": { + "user.id": "kimchy" + } + }, + "_source": ["user.id", "message", "http.response.status_code"] +} +-------------------------------------------------- +// TEST[continued] +// TEST[s/ccs_minimize_roundtrips=true/ccs_minimize_roundtrips=true&wait_for_completion_timeout=100ms&keep_on_completion=true/] + +<1> This will *not* exclude `cluster_three` from the search. It will still be +contacted and told to search any indexes matching `my-index-*` except for +`my-index-000001`. + + + [discrete] [[ccs-async-search-minimize-roundtrips-false]] === Using async search for {ccs} with ccs_minimize_roundtrips=false -The `_shards` and `_clusters` section of the response behave differently -when `ccs_minimize_roundtrips` is `false` in asynchronous searches. +The `_shards` and `_clusters` section of the response behave +differently when `ccs_minimize_roundtrips` is `false`. Key differences are: @@ -626,11 +1015,10 @@ of shards is gathered from all clusters before the search starts. shards complete, so you will get a more accurate accounting of progress during a long-running search compared to when minimize roundtrips is used. -. The `_cluster` section starts off in its final state, showing which clusters -were successful or skipped based on gathering shard information before the actual -search phase against each shard begins. +. The `_cluster` section starts off listing all of its shard counts, since +they are also obtained before the query phase begins. -Example using the same set up as in the previous section (`ccs_minimize_roundtrips=true`): +Example using the same setup as in the previous section (`ccs_minimize_roundtrips=true`): [source,console] -------------------------------------------------- @@ -670,8 +1058,43 @@ the `wait_for_completion_timeout` duration (see <>). }, "_clusters": { "total" : 3, - "successful": 3, <2> - "skipped": 0 + "successful": 0, + "skipped": 0, + "details": { <2> + "(local)": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false, + "_shards": { + "total": 10, + "successful": 0, + "skipped": 0, + "failed": 0 + } + }, + "cluster_one": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false, + "_shards": { + "total": 12, + "successful": 0, + "skipped": 0, + "failed": 0 + } + }, + "cluster_two": { + "status": "running", + "indices": "my-index-000001", + "timed_out": false, + "_shards": { + "total": 6, + "successful": 0, + "skipped": 0, + "failed": 0 + } + } + } }, "hits": { "total" : { @@ -684,34 +1107,31 @@ the `wait_for_completion_timeout` duration (see <>). } } -------------------------------------------------- -// TESTRESPONSE[s/FklQYndoTDJ2VEFlMEVBTzFJMGhJVFEaLVlKYndBWWZSMUdicUc4WVlEaFl4ZzoxNTU=/$body.id/] -// TESTRESPONSE[s/"is_partial": true/"is_partial": $body.is_partial/] -// TESTRESPONSE[s/"is_running": true/"is_running": $body.is_running/] -// TESTRESPONSE[s/1685563581380/$body.start_time_in_millis/] -// TESTRESPONSE[s/1685995581380/$body.expiration_time_in_millis/] -// TESTRESPONSE[s/"response"/"completion_time_in_millis": $body.completion_time_in_millis,\n "response"/] -// TESTRESPONSE[s/"max_score": null/"max_score": "$body.response.hits.max_score"/] -// TESTRESPONSE[s/\d+/$body.$_path/] -// TESTRESPONSE[s/"hits": \[\]/"hits": $body.response.hits.hits/] +// TEST[skip: hard to reproduce intermediate results] + <1> All shards from all clusters in scope for the search are listed here. Watch this -section for updates to monitor search progress. +section and/or the _clusters section for updates to monitor search progress. <2> The `_clusters` section shows that shard information was successfully -gathered from all 3 clusters and that all will be searched (none are being skipped). +gathered from all 3 clusters and the total shard count on each cluster is listed. + + [discrete] [[skip-unavailable-clusters]] === Optional remote clusters -By default, a {ccs} fails if a remote cluster in the request returns an -error or is unavailable. Use the `skip_unavailable` cluster -setting to mark a specific remote cluster as optional for {ccs}. +By default, a {ccs} fails if a remote cluster in the request is unavailable +or returns an error where the search on all shards failed. Use the +`skip_unavailable` cluster setting to mark a specific remote cluster as +optional for {ccs}. If `skip_unavailable` is `true`, a {ccs}: * Skips the remote cluster if its nodes are unavailable during the search. The -response's `_cluster.skipped` value contains a count of any skipped clusters. +response's `_clusters.skipped` value contains a count of any skipped clusters +and the `_clusters.details` section of the response will show a `skipped` status. * Ignores errors returned by the remote cluster, such as errors related to unavailable shards or indices. This can include errors related to search @@ -724,7 +1144,7 @@ when searching the remote cluster. This means searches on the remote cluster may return partial results. The following <> -API request changes `cluster_two`'s `skip_unavailable` setting to `true`. +API request changes `skip_unavailable` setting to `true` for `cluster_two`. [source,console] -------------------------------- @@ -738,7 +1158,10 @@ PUT _cluster/settings // TEST[continued] If `cluster_two` is disconnected or unavailable during a {ccs}, {es} won't -include matching documents from that cluster in the final results. +include matching documents from that cluster in the final results. If at +least one shard provides results, those results will be used and the +search will return partial data. (If doing {ccs} using async search, +the `is_partial` field will be set to `true` to indicate partial results.) [discrete] [[ccs-network-delays]] @@ -774,6 +1197,25 @@ network roundtrips, and sets the parameter `ccs_minimize_roundtrips` to `false`. [discrete] [[ccs-min-roundtrips]] +==== Considerations for choosing whether to minimize roundtrips in a {ccs} + +For cross-cluster searches that query a large number of shards, the minimize roundtrips +option typically provides much better performance. This is especially true if the clusters +being searched have high network latency (e.g., distant geographic regions). + +However, not minimizing roundtrips allows you to get back incremental results of +any aggregations in your query when using async-search while the search is still +running. + +By default, synchronous searches minimize roundtrips, while asynchronous searches +do not. You can override the default by using the `ccs_minimize_roundtrips` parameter, +setting it to either `true` or `false`, as shown in several examples earlier in this +document. + + +[discrete] +[[ccs-min-roundtrips-true]] + ==== Minimize network roundtrips Here's how {ccs} works when you minimize network roundtrips. @@ -886,8 +1328,8 @@ on the same version of {es}. If you need to maintain clusters with different versions, you can: * Maintain a dedicated cluster for {ccs}. Keep this cluster on the earliest -version needed to search the other clusters. For example, if you have 7.17 and 8.x clusters, you can maintain a dedicated 7.17 cluster to use -as the local cluster for {ccs}. +version needed to search the other clusters. For example, if you have 7.17 and 8.x clusters, +you can maintain a dedicated 7.17 cluster to use as the local cluster for {ccs}. * Keep each cluster no more than one minor version apart. This lets you use any cluster as the local cluster when running a {ccs}. diff --git a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc index 257c3b5f7d6c2..642f6ac5afea1 100644 --- a/docs/reference/search/search-your-data/semantic-search-elser.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search-elser.asciidoc @@ -173,7 +173,7 @@ GET _tasks/ // TEST[skip:TBD] You can also open the Trained Models UI, select the Pipelines tab under ELSER to -follow the progress. It may take a couple of minutes to complete the process. +follow the progress. [discrete] @@ -361,3 +361,9 @@ PUT my-index * {ml-docs}/ml-nlp-elser.html[How to download and deploy ELSER] * {ml-docs}/ml-nlp-limitations.html#ml-nlp-elser-v1-limit-512[ELSER v1 limitation] * https://www.elastic.co/blog/may-2023-launch-information-retrieval-elasticsearch-ai-model[Improving information retrieval in the Elastic Stack: Introducing Elastic Learned Sparse Encoder, our new retrieval model] + +[discrete] +[[interactive-example]] +==== Interactive example + +* The `elasticsearch-labs` repo has an interactive example of running https://github.com/elastic/elasticsearch-labs/blob/main/notebooks/search/03-ELSER.ipynb[ELSER-powered semantic search] using the {es} Python client. diff --git a/docs/reference/search/search-your-data/semantic-search.asciidoc b/docs/reference/search/search-your-data/semantic-search.asciidoc index 1b1c59dc234b7..96281d12102bb 100644 --- a/docs/reference/search/search-your-data/semantic-search.asciidoc +++ b/docs/reference/search/search-your-data/semantic-search.asciidoc @@ -131,5 +131,7 @@ include::{es-repo-dir}/tab-widgets/semantic-search/hybrid-search-widget.asciidoc *** {blog-ref}improving-information-retrieval-elastic-stack-benchmarking-passage-retrieval[Part 2: Benchmarking passage retrieval] *** {blog-ref}may-2023-launch-information-retrieval-elasticsearch-ai-model[Part 3: Introducing Elastic Learned Sparse Encoder, our new retrieval model] *** {blog-ref}improving-information-retrieval-elastic-stack-hybrid[Part 4: Hybrid retrieval] +* Interactive examples: +** The https://github.com/elastic/elasticsearch-labs[`elasticsearch-labs`] repo contains a number of interactive semantic search examples in the form of executable Python notebooks, using the {es} Python client -include::semantic-search-elser.asciidoc[] \ No newline at end of file +include::semantic-search-elser.asciidoc[] diff --git a/docs/reference/settings/security-settings.asciidoc b/docs/reference/settings/security-settings.asciidoc index 49f4b726c7cea..de829bda62246 100644 --- a/docs/reference/settings/security-settings.asciidoc +++ b/docs/reference/settings/security-settings.asciidoc @@ -2578,6 +2578,7 @@ beta::[] :ssl-prefix: xpack.security.remote_cluster_server :component: Remote cluster server (API key based model) +:enabled-by-default: :client-auth-default: none :verifies!: :server: @@ -2593,6 +2594,7 @@ beta::[] :ssl-prefix: xpack.security.remote_cluster_client :component: Remote cluster client (API key based model) +:enabled-by-default: :client-auth-default: none :verifies: :server!: @@ -2645,13 +2647,14 @@ List of IP addresses to allow for this profile. (<>) List of IP addresses to deny for this profile. -// TODO: fix the link to new page of API key based remote clusters `xpack.security.remote_cluster.filter.allow`:: (<>) -beta:[] List of IP addresses to allow just for the remote cluster server. +beta:[] List of IP addresses to allow just for the +<>. `xpack.security.remote_cluster.filter.deny`:: (<>) -beta:[] List of IP addresses to deny just for the remote cluster server. +beta:[] List of IP addresses to deny just for the remote cluster server configured with +the <>. include::security-hash-settings.asciidoc[] diff --git a/docs/reference/settings/ssl-settings.asciidoc b/docs/reference/settings/ssl-settings.asciidoc index ada26b38fe13d..2c0eef7077f4d 100644 --- a/docs/reference/settings/ssl-settings.asciidoc +++ b/docs/reference/settings/ssl-settings.asciidoc @@ -1,11 +1,15 @@ ==== {component} TLS/SSL settings You can configure the following TLS/SSL settings. -ifdef::server[] +{ssl-prefix}.ssl.enabled+:: (<>) -Used to enable or disable TLS/SSL on the {ssl-layer}. The default is `false`. -endif::server[] +Used to enable or disable TLS/SSL on the {ssl-layer}. +ifdef::enabled-by-default[] +The default is `true`. +endif::enabled-by-default[] +ifndef::enabled-by-default[] +The default is `false`. +endif::enabled-by-default[] +{ssl-prefix}.ssl.supported_protocols+:: (<>) diff --git a/docs/reference/setup/install/deb.asciidoc b/docs/reference/setup/install/deb.asciidoc index dd58391c37369..edaed7c785489 100644 --- a/docs/reference/setup/install/deb.asciidoc +++ b/docs/reference/setup/install/deb.asciidoc @@ -30,12 +30,10 @@ wget -qO - https://artifacts.elastic.co/GPG-KEY-elasticsearch | sudo gpg --dearm ifeval::["{release-state}"=="unreleased"] -Version {version} of Elasticsearch has not yet been released. +WARNING: Version {version} of Elasticsearch has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - You may need to install the `apt-transport-https` package on Debian before proceeding: [source,sh] @@ -52,8 +50,6 @@ ifeval::["{release-state}"=="released"] echo "deb [signed-by=/usr/share/keyrings/elasticsearch-keyring.gpg] https://artifacts.elastic.co/packages/{major-version}/apt stable main" | sudo tee /etc/apt/sources.list.d/elastic-{major-version}.list -------------------------------------------------- -endif::[] - ifeval::["{release-state}"=="prerelease"] ["source","sh",subs="attributes,callouts"] @@ -110,12 +106,10 @@ include::skip-set-kernel-parameters.asciidoc[] ifeval::["{release-state}"=="unreleased"] -Version {version} of Elasticsearch has not yet been released. +WARNING: Version {version} of Elasticsearch has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - The Debian package for Elasticsearch v{version} can be downloaded from the website and installed as follows: ["source","sh",subs="attributes"] @@ -128,8 +122,6 @@ sudo dpkg -i elasticsearch-{version}-amd64.deb <1> Compares the SHA of the downloaded Debian package and the published checksum, which should output `elasticsearch-{version}-amd64.deb: OK`. -endif::[] - // Set a `distro` attribute so we can reuse files containing anchors :distro: deb [id="deb-security-configuration"] diff --git a/docs/reference/setup/install/docker.asciidoc b/docs/reference/setup/install/docker.asciidoc index 0abba259d46eb..d4ebacb292224 100644 --- a/docs/reference/setup/install/docker.asciidoc +++ b/docs/reference/setup/install/docker.asciidoc @@ -33,15 +33,11 @@ Docker image is currently available for this version. endif::[] -ifeval::["{release-state}"!="unreleased"] - [source,sh,subs="attributes"] ---- -docker pull {docker-repo}:{version} +docker pull {docker-image} ---- -endif::[] - [[docker-verify-signature]] ==== Optional: Verify the image signature @@ -57,8 +53,6 @@ Docker image signature is currently available for this version. endif::[] -ifeval::["{release-state}"!="unreleased"] - Install the appropriate https://docs.sigstore.dev/cosign/installation/[Cosign application] for your operating system. @@ -67,7 +61,7 @@ The container image signature for {es} v{version} can be verified as follows: ["source","sh",subs="attributes"] -------------------------------------------- wget https://artifacts.elastic.co/cosign.pub <1> -cosign verify --key cosign.pub {docker-repo}:{version} <2> +cosign verify --key cosign.pub {docker-image} <2> -------------------------------------------- <1> Download the Elastic public key to verify container signature <2> Verify the container against the Elastic public key @@ -76,22 +70,20 @@ The command prints the check results and the signature payload in JSON format: [source,sh,subs="attributes"] -------------------------------------------- -Verification for docker.elastic.co/elasticsearch/elasticsearch:{version} -- +Verification for {docker-image} -- The following checks were performed on each of these signatures: - The cosign claims were validated - Existence of the claims in the transparency log was verified offline - The signatures were verified against the specified public key -------------------------------------------- -endif::[] - [[docker-cli-run-dev-mode]] ==== Run {es} in Docker Use Docker commands to start a single-node {es} cluster for development or testing. You can then run additional Docker commands to add nodes to the test -cluster. +cluster or run {kib}. TIP: This setup doesn't run multiple {es} nodes or {kib} by default. To create a multi-node cluster with {kib}, use Docker Compose instead. See @@ -123,17 +115,15 @@ Docker image is currently available for this version. endif::[] -ifeval::["{release-state}"!="unreleased"] [source,sh,subs="attributes"] ---- docker run --name es01 --net elastic -p 9200:9200 -it -m 1GB {docker-image} ---- -TIP: Use the `-m` flag to set a memory limit for the container. +TIP: Use the `-m` flag to set a memory limit for the container. This removes the +need to <>. The command prints the `elastic` user password and an enrollment token for {kib}. - -endif::[] -- . Copy the generated `elastic` password and enrollment token. These credentials @@ -143,7 +133,7 @@ credentials using the following commands. -- [source,sh,subs="attributes"] ---- -docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password +docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana ---- @@ -193,13 +183,10 @@ Docker image is currently available for this version. endif::[] -ifeval::["{release-state}"!="unreleased"] [source,sh,subs="attributes"] ---- -docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it {docker-image} +docker run -e ENROLLMENT_TOKEN="" --name es02 --net elastic -it -m 1GB {docker-image} ---- - -endif::[] -- . Call the <> to verify the node was added to the cluster. @@ -210,17 +197,83 @@ curl --cacert http_ca.crt -u elastic:$ELASTIC_PASSWORD https://localhost:9200/_c ---- // NOTCONSOLE -===== Setting JVM heap size -If you experience issues where the container where your first node is running -exits when your second node starts, explicitly set values for the JVM heap size. -To <>, include the -`ES_JAVA_OPTS` variable and set values for `-Xms` and `-Xmx` when starting each -node. For example, the following command starts node `es02` and sets the -minimum and maximum JVM heap size to 1 GB: +[[run-kibana-docker]] +===== Run {kib} + +. Pull the {kib} Docker image. ++ +-- +ifeval::["{release-state}"=="unreleased"] +WARNING: Version {version} of {kib} has not yet been released, so no +Docker image is currently available for this version. +endif::[] [source,sh,subs="attributes"] ---- -docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es02 -p 9201:9200 --net elastic -it {docker-image} +docker pull {kib-docker-image} +---- +-- + +. Optional: Verify the {kib} image's signature. ++ +-- +ifeval::["{release-state}"=="unreleased"] +WARNING: Version {version} of {kib} has not yet been released, so no +Docker image signature is currently available for this version. +endif::[] + +[source,sh,subs="attributes"] +---- +wget https://artifacts.elastic.co/cosign.pub +cosign verify --key cosign.pub {kib-docker-image} +---- +-- + +. Start a {kib} container. ++ +[source,sh,subs="attributes"] +---- +docker run --name kib01 --net elastic -p 5601:5601 {kib-docker-image} +---- + +. When {kib} starts, it outputs a unique generated link to the terminal. To +access {kib}, open this link in a web browser. + +. In your browser, enter the enrollment token that was generated when you started {es}. ++ +To regenerate the token, run: ++ +[source,sh] +---- +docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-create-enrollment-token -s kibana +---- + +. Log in to {kib} as the `elastic` user with the password that was generated +when you started {es}. ++ +To regenerate the password, run: ++ +[source,sh] +---- +docker exec -it es01 /usr/share/elasticsearch/bin/elasticsearch-reset-password -u elastic +---- + +[[remove-containers-docker]] +===== Remove containers + +To remove the containers and their network, run: + +[source,sh,subs="attributes"] +---- +# Remove the Elastic network +docker network rm elastic + +# Remove {es} containers +docker rm es01 +docker rm es02 + +# Remove the {kib} container +docker rm kib01 ---- ===== Next steps @@ -265,15 +318,13 @@ repository on GitHub. -- ifeval::["{release-state}"=="unreleased"] -NOTE: Version {version} of {es} has not been released, -so the sample Docker Compose and configuration files are not yet available for -this version. See the {stack-gs-current}/get-started-docker.html[current version] -for the latest sample files. +WARNING: Version {version} of {es} has not been released, +so the following Docker Compose and configuration files won't work. +See the {stack-gs-current}/get-started-docker.html[current version] +for the latest working files. endif::[] -- --- -ifeval::["{release-state}"!="unreleased"] [discrete] [[docker-env-file]] @@ -315,8 +366,6 @@ then only be accessible from the host machine itself. include::docker/docker-compose.yml[] ---- -endif::[] --- ===== Start your cluster with security enabled and configured @@ -426,12 +475,12 @@ sudo sysctl -w vm.max_map_count=262144 ====== Windows with https://docs.docker.com/docker-for-windows/wsl[Docker Desktop WSL 2 backend] The `vm.max_map_count` setting must be set in the "docker-desktop" WSL instance before the -ElasticSearch container will properly start. There are several ways to do this, depending +{es} container will properly start. There are several ways to do this, depending on your version of Windows and your version of WSL. If you are on Windows 10 before version 22H2, or if you are on Windows 10 version 22H2 using the built-in version of WSL, you must either manually set it every time you restart Docker before starting -your ElasticSearch container, or (if you do not wish to do so on every restart) you must globally set +your {es} container, or (if you do not wish to do so on every restart) you must globally set every WSL2 instance to have the `vm.max_map_count` changed. This is because these versions of WSL do not properly process the /etc/sysctl.conf file. @@ -516,9 +565,9 @@ for the Docker daemon sets them to acceptable values. To check the Docker daemon defaults for ulimits, run: -[source,sh] +[source,sh,subs="attributes"] -------------------------------------------- -docker run --rm docker.elastic.co/elasticsearch/elasticsearch:{version} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' +docker run --rm {docker-image} /bin/bash -c 'ulimit -Hn && ulimit -Sn && ulimit -Hu && ulimit -Su' -------------------------------------------- If needed, adjust them in the Daemon or override them per container. @@ -565,16 +614,20 @@ options>> file under `/usr/share/elasticsearch/config/jvm.options.d` that includes your desired <> settings. For testing, you can also manually set the heap size using the `ES_JAVA_OPTS` -environment variable. For example, to use 16GB, specify `-e -ES_JAVA_OPTS="-Xms16g -Xmx16g"` with `docker run`. The `ES_JAVA_OPTS` variable -overrides all other JVM options. We do not recommend using `ES_JAVA_OPTS` in -production. The `docker-compose.yml` file above sets the heap size to 512MB. +environment variable. For example, to use 1GB, use the following command. + +[source,sh,subs="attributes"] +---- +docker run -e ES_JAVA_OPTS="-Xms1g -Xmx1g" -e ENROLLMENT_TOKEN="" --name es01 -p 9200:9200 --net elastic -it {docker-image} +---- +The `ES_JAVA_OPTS` variable overrides all other JVM options. +We do not recommend using `ES_JAVA_OPTS` in production. ===== Pin deployments to a specific image version Pin your deployments to a specific version of the {es} Docker image. For -example +docker.elastic.co/elasticsearch/elasticsearch:{version}+. +example +{docker-image}+. ===== Always bind data volumes @@ -687,14 +740,13 @@ instead. The command must: * Use the `elasticsearch-keystore` tool with the `create -p` option. You'll be prompted to enter a password for the keystore. -ifeval::["{release-state}"!="unreleased"] For example: [source,sh,subs="attributes"] ---- docker run -it --rm \ -v full_path_to/config:/usr/share/elasticsearch/config \ -docker.elastic.co/elasticsearch/elasticsearch:{version} \ +{docker-image} \ bin/elasticsearch-keystore create -p ---- @@ -706,12 +758,11 @@ encrypted, you'll also be prompted to enter the keystore password. ---- docker run -it --rm \ -v full_path_to/config:/usr/share/elasticsearch/config \ -docker.elastic.co/elasticsearch/elasticsearch:{version} \ +{docker-image} \ bin/elasticsearch-keystore \ add my.secure.setting \ my.other.secure.setting ---- -endif::[] If you've already created the keystore and don't need to update it, you can bind-mount the `elasticsearch.keystore` file directly. You can use the @@ -732,7 +783,7 @@ your configuration. A `Dockerfile` to achieve this might be as simple as: [source,sh,subs="attributes"] -------------------------------------------- -FROM docker.elastic.co/elasticsearch/elasticsearch:{version} +FROM {docker-image} COPY --chown=elasticsearch:elasticsearch elasticsearch.yml /usr/share/elasticsearch/config/ -------------------------------------------- diff --git a/docs/reference/setup/install/rpm.asciidoc b/docs/reference/setup/install/rpm.asciidoc index 4dda986207f6c..8dfbca8c63210 100644 --- a/docs/reference/setup/install/rpm.asciidoc +++ b/docs/reference/setup/install/rpm.asciidoc @@ -34,12 +34,10 @@ rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch ifeval::["{release-state}"=="unreleased"] -Version {version} of Elasticsearch has not yet been released. +WARNING: Version {version} of Elasticsearch has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - Create a file called `elasticsearch.repo` in the `/etc/yum.repos.d/` directory for RedHat based distributions, or in the `/etc/zypp/repos.d/` directory for OpenSuSE based distributions, containing: @@ -58,8 +56,6 @@ autorefresh=1 type=rpm-md -------------------------------------------------- -endif::[] - ifeval::["{release-state}"=="prerelease"] ["source","sh",subs="attributes,callouts"] @@ -101,12 +97,10 @@ endif::[] ifeval::["{release-state}"=="unreleased"] -Version {version} of Elasticsearch has not yet been released. +WARNING: Version {version} of Elasticsearch has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - The RPM for Elasticsearch v{version} can be downloaded from the website and installed as follows: ["source","sh",subs="attributes"] @@ -119,8 +113,6 @@ sudo rpm --install elasticsearch-{version}-x86_64.rpm <1> Compares the SHA of the downloaded RPM and the published checksum, which should output `elasticsearch-{version}-x86_64.rpm: OK`. -endif::[] - include::skip-set-kernel-parameters.asciidoc[] // Set a `distro` attribute so we can reuse files containing anchors diff --git a/docs/reference/setup/install/targz.asciidoc b/docs/reference/setup/install/targz.asciidoc index ae5d72a6dd43a..c3dd6ad354d9d 100644 --- a/docs/reference/setup/install/targz.asciidoc +++ b/docs/reference/setup/install/targz.asciidoc @@ -19,12 +19,10 @@ see the <> ifeval::["{release-state}"=="unreleased"] -Version {version} of {es} has not yet been released. +WARNING: Version {version} of {es} has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - The Linux archive for {es} v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] @@ -39,19 +37,15 @@ cd elasticsearch-{version}/ <2> `elasticsearch-{version}-linux-x86_64.tar.gz: OK`. <2> This directory is known as `$ES_HOME`. -endif::[] - [[install-macos]] ==== Download and install archive for MacOS ifeval::["{release-state}"=="unreleased"] -Version {version} of {es} has not yet been released. +WARNING: Version {version} of {es} has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - The MacOS archive for {es} v{version} can be downloaded and installed as follows: ["source","sh",subs="attributes"] @@ -67,8 +61,6 @@ cd elasticsearch-{version}/ <2> `elasticsearch-{version}-darwin-x86_64.tar.gz: OK`. <2> This directory is known as `$ES_HOME`. -endif::[] - ifdef::include-xpack[] [role="xpack"] [[targz-enable-indices]] diff --git a/docs/reference/setup/install/zip-windows.asciidoc b/docs/reference/setup/install/zip-windows.asciidoc index 95341189c67fa..7c3aab0bb89d8 100644 --- a/docs/reference/setup/install/zip-windows.asciidoc +++ b/docs/reference/setup/install/zip-windows.asciidoc @@ -29,12 +29,10 @@ see the <> ifeval::["{release-state}"=="unreleased"] -Version {version} of {es} has not yet been released. +WARNING: Version {version} of {es} has not yet been released. endif::[] -ifeval::["{release-state}"!="unreleased"] - Download the `.zip` archive for {es} {version} from: https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-{version}-windows-x86_64.zip Unzip it with your favorite unzip tool. This will create a folder called @@ -46,8 +44,6 @@ window, `cd` to the `%ES_HOME%` directory, for instance: cd C:\elasticsearch-{version} ---------------------------- -endif::[] - ifdef::include-xpack[] [role="xpack"] [[windows-enable-indices]] diff --git a/docs/reference/setup/run-elasticsearch-locally.asciidoc b/docs/reference/setup/run-elasticsearch-locally.asciidoc index 66152933b0e20..a6e6d5c8963a2 100644 --- a/docs/reference/setup/run-elasticsearch-locally.asciidoc +++ b/docs/reference/setup/run-elasticsearch-locally.asciidoc @@ -34,10 +34,9 @@ Desktop]. Go to **Preferences > Resources > Advanced** and set Memory to at leas . Start an Elasticsearch container: ifeval::["{release-state}"=="unreleased"] + -NOTE: Version {version} of {es} has not yet been released, so no +WARNING: Version {version} of {es} has not yet been released, so no Docker image is currently available for this version. endif::[] -ifeval::["{release-state}"!="unreleased"] + [source,sh,subs="attributes"] ---- @@ -45,7 +44,6 @@ docker network create elastic docker pull docker.elastic.co/elasticsearch/elasticsearch:{version} docker run --name elasticsearch --net elastic -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" -t docker.elastic.co/elasticsearch/elasticsearch:{version} ---- -endif::[] + When you start Elasticsearch for the first time, the generated `elastic` user password and Kibana enrollment token are output to the terminal. @@ -65,17 +63,15 @@ Kibana enables you to easily send requests to Elasticsearch and analyze, visuali . In a new terminal session, start Kibana and connect it to your Elasticsearch container: ifeval::["{release-state}"=="unreleased"] + -NOTE: Version {version} of {kib} has not yet been released, so no +WARNING: Version {version} of {kib} has not yet been released, so no Docker image is currently available for this version. endif::[] -ifeval::["{release-state}"!="unreleased"] + [source,sh,subs="attributes"] ---- docker pull docker.elastic.co/kibana/kibana:{version} docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} ---- -endif::[] + When you start Kibana, a unique URL is output to your terminal. diff --git a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc index 4d578b3df489d..a50d4e3311937 100644 --- a/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/put-repo-api.asciidoc @@ -17,6 +17,8 @@ PUT /_snapshot/my_repository } ---- +IMPORTANT: If you're migrating {ref}/searchable-snapshots.html[searchable snapshots], the repository's name must be identical in the source and destination clusters. + [[put-snapshot-repo-api-request]] ==== {api-request-title} diff --git a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc index 5c1b38e779880..974443e625de3 100644 --- a/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc +++ b/docs/reference/snapshot-restore/apis/restore-snapshot-api.asciidoc @@ -29,7 +29,7 @@ PUT /index_4 PUT _snapshot/my_repository/snapshot_2?wait_for_completion=true { - "indices": "index_3,index_4", + "indices": "index_1,index_2", "ignore_unavailable": true, "include_global_state": false, "metadata": { @@ -230,6 +230,9 @@ Defines the rename replacement string. See <> API reports +`no_valid_shard_copy`. + +The following request <> `index_1` and then restores it +in-place from the `snapshot_2` snapshot in the `my_repository` repository. + +[source,console] +---- +POST index_1/_close + +POST /_snapshot/my_repository/snapshot_2/_restore?wait_for_completion=true +{ + "indices": "index_1" +} +---- diff --git a/docs/reference/tab-widgets/quick-start-cleanup-widget.asciidoc b/docs/reference/tab-widgets/quick-start-cleanup-widget.asciidoc deleted file mode 100644 index 2950e6a7d44f6..0000000000000 --- a/docs/reference/tab-widgets/quick-start-cleanup-widget.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -++++ -
-
- - -
-
-++++ - -include::quick-start-cleanup.asciidoc[tag=cloud] - -++++ -
- -
-++++ diff --git a/docs/reference/tab-widgets/quick-start-cleanup.asciidoc b/docs/reference/tab-widgets/quick-start-cleanup.asciidoc deleted file mode 100644 index c88164037f975..0000000000000 --- a/docs/reference/tab-widgets/quick-start-cleanup.asciidoc +++ /dev/null @@ -1,24 +0,0 @@ - -// tag::cloud[] -Click **Delete deployment** from the deployment overview page and follow the -prompts. -// end::cloud[] - -// tag::self-managed[] -To stop your {es} and {kib} Docker containers, run: - -[source,sh] ----- -docker stop es01 -docker stop kibana ----- - -To remove the containers and their network, run: - -[source,sh] ----- -docker network rm elastic -docker rm es01 -docker rm kibana ----- -// end::self-managed[] diff --git a/docs/reference/tab-widgets/quick-start-install-widget.asciidoc b/docs/reference/tab-widgets/quick-start-install-widget.asciidoc deleted file mode 100644 index 25f2c8a012b80..0000000000000 --- a/docs/reference/tab-widgets/quick-start-install-widget.asciidoc +++ /dev/null @@ -1,40 +0,0 @@ -++++ -
-
- - -
-
-++++ - -include::quick-start-install.asciidoc[tag=cloud] - -++++ -
- -
-++++ diff --git a/docs/reference/tab-widgets/quick-start-install.asciidoc b/docs/reference/tab-widgets/quick-start-install.asciidoc deleted file mode 100644 index 3860488ef74e2..0000000000000 --- a/docs/reference/tab-widgets/quick-start-install.asciidoc +++ /dev/null @@ -1,88 +0,0 @@ - -// tag::cloud[] -include::{docs-root}/shared/cloud/ess-getting-started.asciidoc[tag=generic] - -. Click **Continue** to open {kib}. - -. Click **Explore on my own**. -// end::cloud[] - -// tag::self-managed[] -**Install and run {es}** - -ifeval::["{release-state}"=="unreleased"] -NOTE: No Docker image is currently available for {es} {version}. -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -. Install and start https://www.docker.com/products/docker-desktop[Docker -Desktop]. - -. Run: -+ -[source,sh,subs="attributes"] ----- -docker network create elastic -docker pull {docker-repo}:{version} -docker run --name es01 --net elastic -p 9200:9200 -p 9300:9300 -it {docker-image} ----- -+ -When you start {es} for the first time, the following security configuration -occurs automatically: -+ --- -* <> are generated -for the transport and HTTP layers. -* The Transport Layer Security (TLS) configuration settings are written to -`elasticsearch.yml`. -* A password is generated for the `elastic` user. -* An enrollment token is generated for {kib}. - -NOTE: You might need to scroll back a bit in the terminal to view the password -and enrollment token. --- - -. Copy the generated password and enrollment token and save them in a secure -location. These values are shown only when you start {es} for the first time. -You'll use these to enroll {kib} with your {es} cluster and log in. -+ -[NOTE] -==== -If you need to reset the password for the `elastic` user or other -built-in users, run the <> tool. -To generate new enrollment tokens for {kib} or {es} nodes, run the -<> tool. -These tools are available in the {es} `bin` directory. -==== -endif::[] - -**Install and run {kib}** - -To analyze, visualize, and manage {es} data using an intuitive UI, install -{kib}. - -ifeval::["{release-state}"=="unreleased"] -NOTE: No Docker image is currently available for {kib} {version}. -endif::[] - -ifeval::["{release-state}"!="unreleased"] - -. In a new terminal session, run: -+ -["source","txt",subs="attributes"] ----- -docker pull docker.elastic.co/kibana/kibana:{version} -docker run --name kibana --net elastic -p 5601:5601 docker.elastic.co/kibana/kibana:{version} ----- -+ -When you start {kib}, a unique link is output to your terminal. - -. To access {kib}, click the generated link in your terminal. - - .. In your browser, paste the enrollment token that you copied and click the button to connect your {kib} instance with {es}. - - .. Log in to {kib} as the `elastic` user with the password that was generated when you started {es}. - -endif::[] -// end::self-managed[] diff --git a/docs/reference/troubleshooting.asciidoc b/docs/reference/troubleshooting.asciidoc index edab1d40dee8c..edd73ba393c38 100644 --- a/docs/reference/troubleshooting.asciidoc +++ b/docs/reference/troubleshooting.asciidoc @@ -48,14 +48,15 @@ fix problems that an {es} deployment might encounter. [discrete] [[troubleshooting-others]] -=== Others -* <> +=== Other issues +* <> * <> * <> * <> * <> * <> -* <> +* <> +* <> If none of these solutions relate to your issue, you can still get help: @@ -116,6 +117,8 @@ include::troubleshooting/snapshot/add-repository.asciidoc[] include::troubleshooting/snapshot/repeated-snapshot-failures.asciidoc[] +include::troubleshooting/troubleshooting-unstable-cluster.asciidoc[] + include::troubleshooting/discovery-issues.asciidoc[] include::monitoring/troubleshooting.asciidoc[] diff --git a/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc b/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc index f4b5ce3deb74b..a8ca4c7d851d1 100644 --- a/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc +++ b/docs/reference/troubleshooting/common-issues/hotspotting.asciidoc @@ -259,8 +259,14 @@ direct indices:data/read/eql 10m node_1 true // TEST[skip:illustrative response only] This surfaces a problematic <>. We can gain -further insight on it via <>. Its response -contains a `description` that reports this query: +further insight on it via <>, + +[source,console] +---- +GET _tasks?human&detailed +---- + +Its response contains a `description` that reports this query: [source,eql] ---- diff --git a/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc new file mode 100644 index 0000000000000..387ebcdcd43c0 --- /dev/null +++ b/docs/reference/troubleshooting/troubleshooting-unstable-cluster.asciidoc @@ -0,0 +1,4 @@ +[[troubleshooting-unstable-cluster]] +== Troubleshooting an unstable cluster + +include::../modules/discovery/fault-detection.asciidoc[tag=troubleshooting,leveloffset=-2] \ No newline at end of file diff --git a/gradle/verification-metadata.xml b/gradle/verification-metadata.xml index 385049cdc78a7..ccb7b4dea36ee 100644 --- a/gradle/verification-metadata.xml +++ b/gradle/verification-metadata.xml @@ -656,9 +656,9 @@ - - - + + + diff --git a/libs/core/src/test/java/org/elasticsearch/jdk/JarHellTests.java b/libs/core/src/test/java/org/elasticsearch/jdk/JarHellTests.java index 222ca144098a7..6bcc1d4b8fb58 100644 --- a/libs/core/src/test/java/org/elasticsearch/jdk/JarHellTests.java +++ b/libs/core/src/test/java/org/elasticsearch/jdk/JarHellTests.java @@ -121,7 +121,7 @@ public void testDirAndJar() throws Exception { public void testNonJDKModuleURLs() throws Throwable { var bootLayer = ModuleLayer.boot(); - Path fooDir = createTempDir(getTestName()); + Path fooDir = createTempDir(); Path fooJar = PathUtils.get(makeJar(fooDir, "foo.jar", null, "p/Foo.class").toURI()); var fooConfiguration = bootLayer.configuration().resolve(ModuleFinder.of(), ModuleFinder.of(fooJar), List.of("foo")); Set urls = JarHell.nonJDKModuleURLs(fooConfiguration).collect(Collectors.toSet()); diff --git a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java index 89c9a5ac62af9..1046a09f53197 100644 --- a/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java +++ b/libs/x-content/impl/src/main/java/org/elasticsearch/xcontent/provider/json/JsonXContentGenerator.java @@ -364,6 +364,31 @@ public void writeString(String value) throws IOException { } } + @Override + public void writeStringArray(String[] array) throws IOException { + try { + if (isFiltered()) { + // filtered serialization does not work correctly with the bulk array serializer, so we need to fall back to serializing + // the array one-by-one + // TODO: this can probably be removed after upgrading Jackson to 2.15.1 or later, see + // https://github.com/FasterXML/jackson-core/issues/1023 + writeStringArrayFiltered(array); + } else { + generator.writeArray(array, 0, array.length); + } + } catch (JsonGenerationException e) { + throw new XContentGenerationException(e); + } + } + + private void writeStringArrayFiltered(String[] array) throws IOException { + writeStartArray(); + for (String s : array) { + writeString(s); + } + writeEndArray(); + } + @Override public void writeString(char[] value, int offset, int len) throws IOException { try { diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java index 5dc72f3e667dd..d63c61eea876c 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentBuilder.java @@ -740,11 +740,7 @@ private XContentBuilder values(String[] values) throws IOException { if (values == null) { return nullValue(); } - startArray(); - for (String s : values) { - value(s); - } - endArray(); + generator.writeStringArray(values); return this; } @@ -884,6 +880,14 @@ public XContentBuilder field(String name, Object value) throws IOException { return field(name).value(value); } + public XContentBuilder field(String name, Collection value) throws IOException { + return stringListField(name, value); + } + + public XContentBuilder field(String name, String[] value) throws IOException { + return array(name, value); + } + public XContentBuilder field(String name, Number value) throws IOException { field(name); if (value instanceof Short) { @@ -964,10 +968,14 @@ public XContentBuilder field(String name, ToXContent value, ToXContent.Params pa return field(name).value(value, params); } - private XContentBuilder value(ToXContent value) throws IOException { + public XContentBuilder value(ToXContent value) throws IOException { return value(value, ToXContent.EMPTY_PARAMS); } + public XContentBuilder value(Map map) throws IOException { + return map(map); + } + private XContentBuilder value(ToXContent value, ToXContent.Params params) throws IOException { if (value == null) { return nullValue(); @@ -1036,7 +1044,7 @@ public XContentBuilder enumSet(String name, EnumSet values) throws IOExceptio return this; } - public XContentBuilder field(String name, Map values) throws IOException { + public XContentBuilder field(String name, Map values) throws IOException { return field(name).map(values); } @@ -1055,8 +1063,7 @@ public XContentBuilder stringStringMap(String name, Map values) } startObject(); for (Map.Entry value : values.entrySet()) { - field(value.getKey()); - value(value.getValue()); + generator.writeStringField(value.getKey(), value.getValue()); } return endObject(); } diff --git a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java index 92c2b781287ce..97739635932a3 100644 --- a/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java +++ b/libs/x-content/src/main/java/org/elasticsearch/xcontent/XContentGenerator.java @@ -76,6 +76,8 @@ public interface XContentGenerator extends Closeable, Flushable { void writeString(String value) throws IOException; + void writeStringArray(String[] array) throws IOException; + void writeString(char[] text, int offset, int len) throws IOException; void writeUTF8String(byte[] value, int offset, int length) throws IOException; diff --git a/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java new file mode 100644 index 0000000000000..0eba8cd5ace14 --- /dev/null +++ b/modules/aggregations/src/internalClusterTest/java/org/elasticsearch/aggregations/bucket/TimeSeriesNestedAggregationsIT.java @@ -0,0 +1,229 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.aggregations.bucket; + +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.aggregations.AggregationIntegTestCase; +import org.elasticsearch.aggregations.bucket.timeseries.InternalTimeSeries; +import org.elasticsearch.aggregations.bucket.timeseries.TimeSeriesAggregationBuilder; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramAggregationBuilder; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.CardinalityAggregationBuilder; +import org.elasticsearch.search.aggregations.metrics.InternalCardinality; +import org.elasticsearch.search.aggregations.metrics.SumAggregationBuilder; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.hamcrest.Matchers; +import org.junit.Before; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Set; +import java.util.TreeSet; +import java.util.function.Supplier; + +public class TimeSeriesNestedAggregationsIT extends AggregationIntegTestCase { + private static int numberOfDimensions; + private static int numberOfDocuments; + + private static final String FOO_DIM_VALUE = "foo".repeat(10); + private static final String BAR_DIM_VALUE = "bar".repeat(11); + private static final String BAZ_DIM_VALUE = "baz".repeat(12); + + @Before + public void setup() throws Exception { + numberOfDimensions = randomIntBetween(10, 20); + final XContentBuilder mapping = timeSeriesIndexMapping(); + long startMillis = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-01T00:00:00Z"); + long endMillis = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-01-31T00:00:00Z"); + numberOfDocuments = randomIntBetween(100, 200); + final Iterator timestamps = getTimestamps(startMillis, endMillis, numberOfDocuments); + // NOTE: use also the last (changing) dimension so to make sure documents are not indexed all in the same shard. + final String[] routingDimensions = new String[] { "dim_000000", formatDim(numberOfDimensions - 1) }; + assertTrue(prepareTimeSeriesIndex(mapping, startMillis, endMillis, routingDimensions).isAcknowledged()); + logger.info("Dimensions: " + numberOfDimensions + " docs: " + numberOfDocuments + " start: " + startMillis + " end: " + endMillis); + + final BulkRequestBuilder bulkIndexRequest = client().prepareBulk(); + for (int docId = 0; docId < numberOfDocuments; docId++) { + final XContentBuilder document = timeSeriesDocument(FOO_DIM_VALUE, BAR_DIM_VALUE, BAZ_DIM_VALUE, docId, timestamps::next); + bulkIndexRequest.add(client().prepareIndex("index").setOpType(DocWriteRequest.OpType.CREATE).setSource(document)); + } + + final BulkResponse bulkIndexResponse = bulkIndexRequest.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); + assertFalse(bulkIndexResponse.hasFailures()); + assertEquals(RestStatus.OK.getStatus(), client().admin().indices().prepareFlush("index").get().getStatus().getStatus()); + } + + private static XContentBuilder timeSeriesDocument( + final String fooDimValue, + final String barDimValue, + final String bazDimValue, + int docId, + final Supplier timestampSupplier + ) throws IOException { + final XContentBuilder docSource = XContentFactory.jsonBuilder(); + docSource.startObject(); + // NOTE: we assign dimensions in such a way that almost all of them have the same value but the last one. + // This way we are going to have just two time series (and two distinct tsid) and the last dimension identifies + // which time series the document belongs to. + for (int dimId = 0; dimId < numberOfDimensions - 1; dimId++) { + docSource.field(formatDim(dimId), fooDimValue); + } + docSource.field(formatDim(numberOfDimensions - 1), docId % 2 == 0 ? barDimValue : bazDimValue); + docSource.field("counter_metric", docId + 1); + docSource.field("gauge_metric", randomDoubleBetween(1000.0, 2000.0, true)); + docSource.field("@timestamp", timestampSupplier.get()); + docSource.endObject(); + + return docSource; + } + + private CreateIndexResponse prepareTimeSeriesIndex( + final XContentBuilder mapping, + long startMillis, + long endMillis, + final String[] routingDimensions + ) { + return prepareCreate("index").setSettings( + Settings.builder() + .put("mode", "time_series") + .put("routing_path", String.join(",", routingDimensions)) + .put("index.number_of_shards", randomIntBetween(1, 3)) + .put("index.number_of_replicas", randomIntBetween(1, 3)) + .put("time_series.start_time", startMillis) + .put("time_series.end_time", endMillis) + .put(MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING.getKey(), numberOfDimensions + 1) + .put(MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), 4192) + .build() + ).setMapping(mapping).get(); + } + + private static Iterator getTimestamps(long startMillis, long endMillis, int numberOfDocs) { + final Set timestamps = new TreeSet<>(); + while (timestamps.size() < numberOfDocs) { + timestamps.add(randomLongBetween(startMillis, endMillis)); + } + return timestamps.iterator(); + } + + private static XContentBuilder timeSeriesIndexMapping() throws IOException { + final XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + builder.startObject("properties"); + for (int i = 0; i < numberOfDimensions; i++) { + builder.startObject(formatDim(i)); + builder.field("type", "keyword"); + builder.field("time_series_dimension", true); + builder.endObject(); + } + builder.startObject("counter_metric"); + builder.field("type", "double"); + builder.field("time_series_metric", "counter"); + builder.endObject(); + builder.startObject("gauge_metric"); + builder.field("type", "double"); + builder.field("time_series_metric", "gauge"); + builder.endObject(); + builder.endObject(); // properties + builder.endObject(); + return builder; + } + + private static String formatDim(int dimId) { + return String.format(Locale.ROOT, "dim_%06d", dimId); + } + + public void testTimeSeriesAggregation() { + final TimeSeriesAggregationBuilder timeSeries = new TimeSeriesAggregationBuilder("ts"); + final SearchResponse aggregationResponse = client().prepareSearch("index").addAggregation(timeSeries).setSize(0).get(); + final InternalTimeSeries ts = (InternalTimeSeries) aggregationResponse.getAggregations().asList().get(0); + assertTimeSeriesAggregation(ts); + } + + public void testSumByTsid() { + final TimeSeriesAggregationBuilder timeSeries = new TimeSeriesAggregationBuilder("ts").subAggregation( + new SumAggregationBuilder("sum").field("gauge_metric") + ); + final SearchResponse searchResponse = client().prepareSearch("index").setQuery(new MatchAllQueryBuilder()).get(); + assertNotEquals(numberOfDocuments, searchResponse.getHits().getHits().length); + final SearchResponse aggregationResponse = client().prepareSearch("index").addAggregation(timeSeries).setSize(0).get(); + final InternalTimeSeries ts = (InternalTimeSeries) aggregationResponse.getAggregations().asList().get(0); + assertTimeSeriesAggregation(ts); + } + + public void testTermsByTsid() { + final TimeSeriesAggregationBuilder timeSeries = new TimeSeriesAggregationBuilder("ts").subAggregation( + new TermsAggregationBuilder("terms").field("dim_0") + ); + final SearchResponse aggregationResponse = client().prepareSearch("index").addAggregation(timeSeries).setSize(0).get(); + final InternalTimeSeries ts = (InternalTimeSeries) aggregationResponse.getAggregations().asList().get(0); + assertTimeSeriesAggregation(ts); + } + + public void testDateHistogramByTsid() { + final TimeSeriesAggregationBuilder timeSeries = new TimeSeriesAggregationBuilder("ts").subAggregation( + new DateHistogramAggregationBuilder("date_histogram").field("@timestamp").calendarInterval(DateHistogramInterval.HOUR) + ); + final SearchResponse aggregationResponse = client().prepareSearch("index").addAggregation(timeSeries).setSize(0).get(); + final InternalTimeSeries ts = (InternalTimeSeries) aggregationResponse.getAggregations().asList().get(0); + assertTimeSeriesAggregation(ts); + } + + public void testCardinalityByTsid() { + final TimeSeriesAggregationBuilder timeSeries = new TimeSeriesAggregationBuilder("ts").subAggregation( + new CardinalityAggregationBuilder("dim_n_cardinality").field(formatDim(numberOfDimensions - 1)) + ); + final SearchResponse aggregationResponse = client().prepareSearch("index").addAggregation(timeSeries).setSize(0).get(); + final InternalTimeSeries ts = (InternalTimeSeries) aggregationResponse.getAggregations().asList().get(0); + assertTimeSeriesAggregation(ts); + ts.getBuckets().forEach(bucket -> { assertCardinality(bucket.getAggregations().get("dim_n_cardinality"), 1); }); + } + + private static void assertTimeSeriesAggregation(final InternalTimeSeries timeSeriesAggregation) { + final List> dimensions = timeSeriesAggregation.getBuckets() + .stream() + .map(InternalTimeSeries.InternalBucket::getKey) + .toList(); + // NOTE: only two time series expected as a result of having just two distinct values for the last dimension + assertEquals(2, dimensions.size()); + + final Map firstTimeSeries = dimensions.get(0); + final Map secondTimeSeries = dimensions.get(1); + + assertTsid(firstTimeSeries); + assertTsid(secondTimeSeries); + } + + private static void assertTsid(final Map timeSeries) { + timeSeries.entrySet().stream().sorted(Map.Entry.comparingByKey()).limit(numberOfDimensions - 2).forEach(entry -> { + assertThat(entry.getValue().toString(), Matchers.equalTo(FOO_DIM_VALUE)); + }); + timeSeries.entrySet().stream().sorted(Map.Entry.comparingByKey()).skip(numberOfDimensions - 1).forEach(entry -> { + assertThat(entry.getValue().toString(), Matchers.oneOf(BAR_DIM_VALUE, BAZ_DIM_VALUE)); + }); + } + + private static void assertCardinality(final InternalCardinality cardinalityAggregation, int expectedCardinality) { + assertEquals(expectedCardinality, cardinalityAggregation.getValue()); + } +} diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java index db88685b5fbce..a25bbe0a6d0be 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/adjacency/AdjacencyMatrixAggregationBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.IndexSearcher; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.aggregations.bucket.adjacency.AdjacencyMatrixAggregator.KeyedFilter; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -262,6 +263,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java index cac35ce644bf7..83f7d496f698f 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/AutoDateHistogramAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -122,7 +123,7 @@ public AutoDateHistogramAggregationBuilder(String name) { public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { super(in); numBuckets = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { minimumIntervalExpression = in.readOptionalString(); } } @@ -130,7 +131,7 @@ public AutoDateHistogramAggregationBuilder(StreamInput in) throws IOException { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(numBuckets); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { out.writeOptionalString(minimumIntervalExpression); } } @@ -268,7 +269,7 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } public static class RoundingInfo implements Writeable { diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java index f181c34329864..4593d6901513a 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogram.java @@ -8,7 +8,7 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -224,9 +224,9 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { super(in); bucketInfo = new BucketInfo(in); format = in.readNamedWriteable(DocValueFormat.class); - buckets = in.readList(stream -> new Bucket(stream, format)); + buckets = in.readCollectionAsList(stream -> new Bucket(stream, format)); this.targetBuckets = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { bucketInnerInterval = in.readVLong(); } else { bucketInnerInterval = 1; // Calculated on merge. @@ -237,9 +237,9 @@ public InternalAutoDateHistogram(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { bucketInfo.writeTo(out); out.writeNamedWriteable(format); - out.writeList(buckets); + out.writeCollection(buckets); out.writeVInt(targetBuckets); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeVLong(bucketInnerInterval); } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java index 405a382373752..e7ec383e637ef 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/bucket/timeseries/TimeSeriesAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.aggregations.bucket.timeseries; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -161,6 +162,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_1_0; + return TransportVersions.V_8_1_0; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java index d8edb19c2782b..ee6157f92a82d 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/metric/MatrixStatsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.aggregations.metric; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.MultiValueMode; @@ -55,14 +56,14 @@ public boolean supportsSampling() { */ public MatrixStatsAggregationBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { multiValueMode = MultiValueMode.readMultiValueModeFrom(in); } } @Override protected void innerWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { multiValueMode.writeTo(out); } } @@ -99,6 +100,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java index 1d8934a26251f..b53269608e078 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSelectorPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -55,7 +56,7 @@ public BucketSelectorPipelineAggregationBuilder(StreamInput in) throws IOExcepti @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeMap(bucketsPathsMap, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(bucketsPathsMap, StreamOutput::writeString); script.writeTo(out); gapPolicy.writeTo(out); } @@ -215,6 +216,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java index 80f9c66814f69..ad26d8ed59438 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/BucketSortPipelineAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; @@ -92,7 +93,7 @@ public BucketSortPipelineAggregationBuilder(String name, List */ public BucketSortPipelineAggregationBuilder(StreamInput in) throws IOException { super(in, NAME); - sorts = in.readList(FieldSortBuilder::new); + sorts = in.readCollectionAsList(FieldSortBuilder::new); from = in.readVInt(); size = in.readOptionalVInt(); gapPolicy = GapPolicy.readFrom(in); @@ -100,7 +101,7 @@ public BucketSortPipelineAggregationBuilder(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeList(sorts); + out.writeCollection(sorts); out.writeVInt(from); out.writeOptionalVInt(size); gapPolicy.writeTo(out); @@ -194,6 +195,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java index 32a8b5897ae87..b4ac7387c6955 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/DerivativePipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; @@ -257,6 +258,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } } diff --git a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java index 821869424c447..35d5a97aa854f 100644 --- a/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java +++ b/modules/aggregations/src/main/java/org/elasticsearch/aggregations/pipeline/MovFnPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -240,6 +241,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java index ecdf112a560ab..6b59aa6e84657 100644 --- a/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java +++ b/modules/aggregations/src/test/java/org/elasticsearch/aggregations/bucket/histogram/InternalAutoDateHistogramTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.aggregations.bucket.AggregationMultiBucketAggregationTestCase; import org.elasticsearch.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder.RoundingInfo; import org.elasticsearch.aggregations.bucket.histogram.InternalAutoDateHistogram.BucketInfo; @@ -477,8 +478,8 @@ public void testSerializationPre830() throws IOException { ); TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_3_0) + TransportVersions.MINIMUM_COMPATIBLE, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_3_0) ); InternalAutoDateHistogram deserialized = copyInstance(instance, version); assertEquals(1, deserialized.getBucketInnerInterval()); @@ -504,7 +505,7 @@ public void testReadFromPre830() throws IOException { + "AAyAAAAZAF5BHllYXIAAARib29sAQAAAAAAAAAKZAADAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" ); try (StreamInput in = new NamedWriteableAwareStreamInput(new BytesArray(bytes).streamInput(), getNamedWriteableRegistry())) { - in.setTransportVersion(TransportVersion.V_8_2_0); + in.setTransportVersion(TransportVersions.V_8_2_0); InternalAutoDateHistogram deserialized = new InternalAutoDateHistogram(in); assertEquals("name", deserialized.getName()); assertEquals(1, deserialized.getBucketInnerInterval()); diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java index c4f8915811aee..c6104e92b0b3e 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/CommonAnalysisPlugin.java @@ -322,6 +322,7 @@ public TokenStream create(TokenStream tokenStream) { filters.put("pattern_capture", requiresAnalysisSettings(PatternCaptureGroupTokenFilterFactory::new)); filters.put("pattern_replace", requiresAnalysisSettings(PatternReplaceTokenFilterFactory::new)); filters.put("persian_normalization", PersianNormalizationFilterFactory::new); + filters.put("persian_stem", PersianStemTokenFilterFactory::new); filters.put("porter_stem", PorterStemTokenFilterFactory::new); filters.put( "predicate_token_filter", diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianStemTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianStemTokenFilterFactory.java new file mode 100644 index 0000000000000..4fcf3fe896fbd --- /dev/null +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/PersianStemTokenFilterFactory.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.analysis.common; + +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.fa.PersianStemFilter; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.analysis.AbstractTokenFilterFactory; + +public class PersianStemTokenFilterFactory extends AbstractTokenFilterFactory { + + PersianStemTokenFilterFactory(IndexSettings indexSettings, Environment environment, String name, Settings settings) { + super(name, settings); + } + + @Override + public TokenStream create(TokenStream tokenStream) { + return new PersianStemFilter(tokenStream); + } +} diff --git a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java index 4ef2f837368c9..8f9a882e29d2a 100644 --- a/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java +++ b/modules/analysis-common/src/main/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactory.java @@ -23,6 +23,7 @@ import org.apache.lucene.analysis.en.KStemFilter; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.es.SpanishLightStemFilter; +import org.apache.lucene.analysis.fa.PersianStemFilter; import org.apache.lucene.analysis.fi.FinnishLightStemFilter; import org.apache.lucene.analysis.fr.FrenchLightStemFilter; import org.apache.lucene.analysis.fr.FrenchMinimalStemFilter; @@ -213,6 +214,10 @@ public TokenStream create(TokenStream tokenStream) { } else if ("minimal_nynorsk".equalsIgnoreCase(language) || "minimalNynorsk".equalsIgnoreCase(language)) { return new NorwegianMinimalStemFilter(tokenStream, NorwegianLightStemmer.NYNORSK); + // Persian stemmers + } else if ("persian".equalsIgnoreCase(language)) { + return new PersianStemFilter(tokenStream); + // Portuguese stemmers } else if ("portuguese".equalsIgnoreCase(language)) { return new SnowballFilter(tokenStream, new PortugueseStemmer()); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java index 777349ee81c93..f147cb47a2c01 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/CommonAnalysisFactoryTests.java @@ -117,6 +117,7 @@ protected Map> getTokenFilters() { filters.put("hindinormalization", HindiNormalizationFilterFactory.class); filters.put("indicnormalization", IndicNormalizationFilterFactory.class); filters.put("persiannormalization", PersianNormalizationFilterFactory.class); + filters.put("persianstem", PersianStemTokenFilterFactory.class); filters.put("scandinaviannormalization", ScandinavianNormalizationFilterFactory.class); filters.put("serbiannormalization", SerbianNormalizationFilterFactory.class); filters.put("soraninormalization", SoraniNormalizationFilterFactory.class); diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java index 24ef0844de5d0..f90b1fecd58a9 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/NGramTokenizerFactoryTests.java @@ -13,15 +13,15 @@ import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.reverse.ReverseStringFilter; import org.apache.lucene.tests.analysis.MockTokenizer; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings.Builder; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTokenStreamTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -160,14 +160,14 @@ public void testBackwardsCompatibilityEdgeNgramTokenFilter() throws Exception { for (int i = 0; i < iters; i++) { final Index index = new Index("test", "_na_"); final String name = "ngr"; - Version v = VersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(random()); Builder builder = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3); boolean reverse = random().nextBoolean(); if (reverse) { builder.put("side", "back"); } Settings settings = builder.build(); - Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetadata.SETTING_VERSION_CREATED, v.id).build(); + Settings indexSettings = newAnalysisSettingsBuilder().put(IndexMetadata.SETTING_VERSION_CREATED, v).build(); Tokenizer tokenizer = new MockTokenizer(); tokenizer.setReader(new StringReader("foo bar")); TokenStream edgeNGramTokenFilter = new EdgeNGramTokenFilterFactory( diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java index 4d0d4b65abdc1..c668a89e53b95 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/StemmerTokenFilterFactoryTests.java @@ -12,16 +12,16 @@ import org.apache.lucene.analysis.core.WhitespaceTokenizer; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.snowball.SnowballFilter; -import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.analysis.AnalysisTestsHelper; import org.elasticsearch.index.analysis.IndexAnalyzers; import org.elasticsearch.index.analysis.NamedAnalyzer; import org.elasticsearch.index.analysis.TokenFilterFactory; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTokenStreamTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -37,7 +37,7 @@ public class StemmerTokenFilterFactoryTests extends ESTokenStreamTestCase { public void testEnglishFilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - Version v = VersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(random()); Settings settings = Settings.builder() .put("index.analysis.filter.my_english.type", "stemmer") .put("index.analysis.filter.my_english.language", "english") @@ -64,7 +64,7 @@ public void testPorter2FilterFactory() throws IOException { int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { - Version v = VersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(random()); Settings settings = Settings.builder() .put("index.analysis.filter.my_porter2.type", "stemmer") .put("index.analysis.filter.my_porter2.language", "porter2") @@ -88,7 +88,7 @@ public void testPorter2FilterFactory() throws IOException { } public void testMultipleLanguagesThrowsException() throws IOException { - Version v = VersionUtils.randomVersion(random()); + IndexVersion v = IndexVersionUtils.randomVersion(random()); Settings settings = Settings.builder() .put("index.analysis.filter.my_english.type", "stemmer") .putList("index.analysis.filter.my_english.language", "english", "light_english") diff --git a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java index cebfbd76cdf77..1ae499ba7e634 100644 --- a/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java +++ b/modules/analysis-common/src/test/java/org/elasticsearch/analysis/common/WordDelimiterGraphTokenFilterFactoryTests.java @@ -9,7 +9,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.core.WhitespaceTokenizer; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.env.Environment; @@ -25,7 +24,7 @@ import org.elasticsearch.plugins.scanners.StablePluginsRegistry; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.IndexSettingsModule; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.io.StringReader; @@ -186,7 +185,11 @@ public void testPreconfiguredFilter() throws IOException { Settings indexSettings = Settings.builder() .put( IndexMetadata.SETTING_VERSION_CREATED, - VersionUtils.randomVersionBetween(random(), Version.V_7_0_0, VersionUtils.getPreviousVersion(Version.V_7_3_0)) + IndexVersionUtils.randomVersionBetween( + random(), + IndexVersion.V_7_0_0, + IndexVersionUtils.getPreviousVersion(IndexVersion.V_7_3_0) + ) ) .put("index.analysis.analyzer.my_analyzer.tokenizer", "standard") .putList("index.analysis.analyzer.my_analyzer.filter", "word_delimiter_graph") diff --git a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java index 646b3b63104de..4e70f709a4263 100644 --- a/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java +++ b/modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.admin.indices.flush.FlushResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsRequest; import org.elasticsearch.action.admin.indices.settings.get.GetSettingsResponse; @@ -65,7 +66,6 @@ import java.util.Set; import java.util.stream.Collectors; -import static org.elasticsearch.cluster.metadata.DataStream.getDefaultBackingIndexName; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; import static org.elasticsearch.cluster.metadata.IndexMetadata.APIBlock.READ_ONLY; import static org.elasticsearch.cluster.metadata.MetadataIndexTemplateService.DEFAULT_TIMESTAMP_FIELD; @@ -236,24 +236,10 @@ public void testUpdatingLifecycleAppliesToAllBackingIndices() throws Exception { String dataStreamName = "metrics-foo"; CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); + client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet(); + client().admin().indices().rolloverIndex(new RolloverRequest(dataStreamName, null)).actionGet(); + int finalGeneration = 3; - int finalGeneration = randomIntBetween(2, 20); - for (int currentGeneration = 1; currentGeneration < finalGeneration; currentGeneration++) { - indexDocs(dataStreamName, 1); - int currentBackingIndexCount = currentGeneration; - assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - DataStream dataStream = getDataStreamResponse.getDataStreams().get(0).getDataStream(); - assertThat(dataStream.getName(), equalTo(dataStreamName)); - List backingIndices = dataStream.getIndices(); - assertThat(backingIndices.size(), equalTo(currentBackingIndexCount + 1)); - String writeIndex = dataStream.getWriteIndex().getName(); - assertThat(writeIndex, backingIndexEqualTo(dataStreamName, currentBackingIndexCount + 1)); - }); - } // Update the lifecycle of the data stream updateLifecycle(dataStreamName, TimeValue.timeValueMillis(1)); // Verify that the retention has changed for all backing indices @@ -316,10 +302,10 @@ public void testAutomaticForceMerge() throws Exception { CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); - int finalGeneration = randomIntBetween(2, 10); + int finalGeneration = randomIntBetween(3, 4); for (int currentGeneration = 1; currentGeneration < finalGeneration; currentGeneration++) { // This is currently the write index, but it will be rolled over as soon as data stream lifecycle runs: - final String toBeRolledOverIndex = DataStream.getDefaultBackingIndexName(dataStreamName, currentGeneration); + final String toBeRolledOverIndex = getBackingIndices(dataStreamName).get(currentGeneration - 1); for (int i = 0; i < randomIntBetween(10, 50); i++) { indexDocs(dataStreamName, randomIntBetween(1, 300)); // Make sure the segments get written: @@ -331,7 +317,7 @@ public void testAutomaticForceMerge() throws Exception { if (currentGeneration == 1) { toBeForceMergedIndex = null; // Not going to be used } else { - toBeForceMergedIndex = DataStream.getDefaultBackingIndexName(dataStreamName, currentGeneration - 1); + toBeForceMergedIndex = getBackingIndices(dataStreamName).get(currentGeneration - 2); } int currentBackingIndexCount = currentGeneration; DataStreamLifecycleService dataStreamLifecycleService = internalCluster().getInstance( @@ -394,7 +380,6 @@ public void testErrorRecordingOnRollover() throws Exception { null, lifecycle ); - Iterable dataLifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); String dataStreamName = "metrics-foo"; CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); @@ -423,7 +408,7 @@ public void testErrorRecordingOnRollover() throws Exception { indexDocs(dataStreamName, 1); assertBusy(() -> { - String writeIndexName = DataStream.getDefaultBackingIndexName(dataStreamName, 2); + String writeIndexName = getBackingIndices(dataStreamName).get(1); String writeIndexRolloverError = null; Iterable lifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); @@ -442,20 +427,15 @@ public void testErrorRecordingOnRollover() throws Exception { updateClusterSettings(Settings.builder().putNull("*")); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - List backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); + List backingIndices = getBackingIndices(dataStreamName); assertThat(backingIndices.size(), equalTo(3)); - String writeIndex = backingIndices.get(2).getName(); + String writeIndex = backingIndices.get(2); // rollover was successful and we got to generation 3 assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 3)); // we recorded the error against the previous write index (generation 2) // let's check there's no error recorded against it anymore - String previousWriteInddex = DataStream.getDefaultBackingIndexName(dataStreamName, 2); + String previousWriteInddex = backingIndices.get(1); Iterable lifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); for (DataStreamLifecycleService lifecycleService : lifecycleServices) { @@ -482,7 +462,6 @@ public void testErrorRecordingOnRetention() throws Exception { null, lifecycle ); - Iterable dataLifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); String dataStreamName = "metrics-foo"; CreateDataStreamAction.Request createDataStreamRequest = new CreateDataStreamAction.Request(dataStreamName); @@ -504,7 +483,7 @@ public void testErrorRecordingOnRetention() throws Exception { assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); }); - String firstGenerationIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 1L); + String firstGenerationIndex = getBackingIndices(dataStreamName).get(0); // mark the first generation index as read-only so deletion fails when we enable the retention configuration updateIndexSettings(Settings.builder().put(READ_ONLY.settingName(), true), firstGenerationIndex); @@ -598,7 +577,7 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); }); - String firstGenerationIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 1L); + String firstGenerationIndex = getBackingIndices(dataStreamName).get(0); ClusterGetSettingsAction.Response response = client().execute( ClusterGetSettingsAction.INSTANCE, new ClusterGetSettingsAction.Request() @@ -636,16 +615,11 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception // let's allow one rollover to go through assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - List backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); + List backingIndices = getBackingIndices(dataStreamName); assertThat(backingIndices.size(), equalTo(3)); }); - String secondGenerationIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 1L); + String secondGenerationIndex = getBackingIndices(dataStreamName).get(1); // check the 2nd generation index picked up the new setting values assertBusy(() -> { GetSettingsRequest getSettingsRequest = new GetSettingsRequest().indices(secondGenerationIndex).includeDefaults(true); @@ -661,6 +635,15 @@ public void testDataLifecycleServiceConfiguresTheMergePolicy() throws Exception }); } + private static List getBackingIndices(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); + return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices().stream().map(Index::getName).toList(); + } + static void indexDocs(String dataStream, int numDocs) { BulkRequest bulkRequest = new BulkRequest(); for (int i = 0; i < numDocs; i++) { @@ -691,10 +674,10 @@ public void testReenableDataStreamLifecycle() throws Exception { client().execute(CreateDataStreamAction.INSTANCE, createDataStreamRequest).get(); indexDocs(dataStreamName, 10); - + List backingIndices = getBackingIndices(dataStreamName); { // backing index should not be managed - String writeIndex = getDefaultBackingIndexName(dataStreamName, 1); + String writeIndex = backingIndices.get(0); ExplainDataStreamLifecycleAction.Response dataStreamLifecycleExplainResponse = client().execute( ExplainDataStreamLifecycleAction.INSTANCE, @@ -708,14 +691,8 @@ public void testReenableDataStreamLifecycle() throws Exception { { // data stream has only one backing index - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - List backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); assertThat(backingIndices.size(), equalTo(1)); - String writeIndex = getDefaultBackingIndexName(dataStreamName, 1); + String writeIndex = backingIndices.get(0); assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 1)); } @@ -727,16 +704,11 @@ public void testReenableDataStreamLifecycle() throws Exception { ); assertBusy(() -> { - GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); - GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) - .actionGet(); - assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); - assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); - List backingIndices = getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); - assertThat(backingIndices.size(), equalTo(2)); - String backingIndex = backingIndices.get(0).getName(); + List currentBackingIndices = getBackingIndices(dataStreamName); + assertThat(currentBackingIndices.size(), equalTo(2)); + String backingIndex = currentBackingIndices.get(0); assertThat(backingIndex, backingIndexEqualTo(dataStreamName, 1)); - String writeIndex = backingIndices.get(1).getName(); + String writeIndex = currentBackingIndices.get(1); assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); }); } diff --git a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java index 5ed3bdfbd1d4e..2a4b6f0c5a5ee 100644 --- a/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java +++ b/modules/data-streams/src/javaRestTest/java/org/elasticsearch/datastreams/TsdbDataStreamRestIT.java @@ -10,7 +10,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.DateFormatters; import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.index.mapper.DateFieldMapper; import org.elasticsearch.test.rest.ObjectPath; import org.junit.Before; @@ -223,6 +225,83 @@ public void testTsdbDataStreamsNanos() throws Exception { assertTsdbDataStream(); } + public void testTsbdDataStreamComponentTemplateWithAllSettingsAndMappings() throws Exception { + // Different component and index template. All settings and mapping are in component template. + final String COMPONENT_TEMPLATE_WITH_SETTINGS_AND_MAPPINGS = """ + { + "template": { + "settings":{ + "index": { + "mode": "time_series", + "routing_path": ["metricset", "k8s.pod.uid"] + } + }, + "mappings":{ + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "network": { + "properties": { + "tx": { + "type": "long" + }, + "rx": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + } + """; + final String DELEGATE_TEMPLATE = """ + { + "index_patterns": ["k8s*"], + "composed_of": ["custom_template"], + "data_stream": { + } + }"""; + + // Delete and add new the templates: + var deleteRequest = new Request("DELETE", "/_index_template/1"); + assertOK(client().performRequest(deleteRequest)); + deleteRequest = new Request("DELETE", "/_component_template/custom_template"); + assertOK(client().performRequest(deleteRequest)); + var request = new Request("POST", "/_component_template/custom_template"); + request.setJsonEntity(COMPONENT_TEMPLATE_WITH_SETTINGS_AND_MAPPINGS); + assertOK(client().performRequest(request)); + request = new Request("POST", "/_index_template/1"); + request.setJsonEntity(DELEGATE_TEMPLATE); + assertOK(client().performRequest(request)); + + // Ensure everything behaves the same, regardless of the fact that all settings and mappings are in component template: + assertTsdbDataStream(); + } + private void assertTsdbDataStream() throws IOException { var bulkRequest = new Request("POST", "/k8s/_bulk"); bulkRequest.setJsonEntity(BULK.replace("$now", formatInstantNanos(Instant.now()))); @@ -541,6 +620,69 @@ public void testUpdateComponentTemplateDoesNotFailIndexTemplateValidation() thro client().performRequest(request); } + public void testLookBackTime() throws IOException { + // Create template that uses index.look_back_time index setting: + String template = """ + { + "index_patterns": ["test*"], + "template": { + "settings":{ + "index": { + "look_back_time": "24h", + "number_of_replicas": 0, + "mode": "time_series" + } + }, + "mappings":{ + "properties": { + "@timestamp" : { + "type": "date" + }, + "field": { + "type": "keyword", + "time_series_dimension": true + } + } + } + }, + "data_stream": {} + }"""; + var putIndexTemplateRequest = new Request("PUT", "/_index_template/2"); + putIndexTemplateRequest.setJsonEntity(template); + assertOK(client().performRequest(putIndexTemplateRequest)); + + // Create data stream: + var createDataStreamRequest = new Request("PUT", "/_data_stream/test123"); + assertOK(client().performRequest(createDataStreamRequest)); + + // Check data stream has been created: + var getDataStreamsRequest = new Request("GET", "/_data_stream"); + var response = client().performRequest(getDataStreamsRequest); + assertOK(response); + var dataStreams = entityAsMap(response); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams"), hasSize(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo("test123")); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.template"), equalTo("2")); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.indices"), hasSize(1)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + assertThat(firstBackingIndex, backingIndexEqualTo("test123", 1)); + + // Check the backing index: + // 2023-08-15T04:35:50.000Z + var indices = getIndex(firstBackingIndex); + var escapedBackingIndex = firstBackingIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo("test123")); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), equalTo("time_series")); + String startTimeFirstBackingIndex = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); + assertThat(startTimeFirstBackingIndex, notNullValue()); + Instant now = Instant.now(); + Instant startTime = DateFormatters.from(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parse(startTimeFirstBackingIndex)).toInstant(); + assertTrue(now.minus(24, ChronoUnit.HOURS).isAfter(startTime)); + String endTimeFirstBackingIndex = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); + assertThat(endTimeFirstBackingIndex, notNullValue()); + } + private static Map getIndex(String indexName) throws IOException { var getIndexRequest = new Request("GET", "/" + indexName + "?human"); var response = client().performRequest(getIndexRequest); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java index f4e660c2b18f4..064030ed2b6d5 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProvider.java @@ -84,10 +84,11 @@ public Settings getAdditionalIndexSettings( if (indexMode == IndexMode.TIME_SERIES) { Settings.Builder builder = Settings.builder(); TimeValue lookAheadTime = DataStreamsPlugin.LOOK_AHEAD_TIME.get(allSettings); + TimeValue lookBackTime = DataStreamsPlugin.LOOK_BACK_TIME.get(allSettings); final Instant start; final Instant end; if (dataStream == null || migrating) { - start = DataStream.getCanonicalTimestampBound(resolvedAt.minusMillis(lookAheadTime.getMillis())); + start = DataStream.getCanonicalTimestampBound(resolvedAt.minusMillis(lookBackTime.getMillis())); end = DataStream.getCanonicalTimestampBound(resolvedAt.plusMillis(lookAheadTime.getMillis())); } else { IndexMetadata currentLatestBackingIndex = metadata.index(dataStream.getWriteIndex()); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java index 3f8b7e40eeb43..cd221ada7a4dc 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/DataStreamsPlugin.java @@ -105,7 +105,14 @@ public class DataStreamsPlugin extends Plugin implements ActionPlugin { Setting.Property.Dynamic ); public static final String LIFECYCLE_CUSTOM_INDEX_METADATA_KEY = "data_stream_lifecycle"; - + public static final Setting LOOK_BACK_TIME = Setting.timeSetting( + "index.look_back_time", + TimeValue.timeValueHours(2), + TimeValue.timeValueMinutes(1), + TimeValue.timeValueDays(7), + Setting.Property.IndexScope, + Setting.Property.Dynamic + ); // The dependency of index.look_ahead_time is a cluster setting and currently there is no clean validation approach for this: private final SetOnce updateTimeSeriesRangeService = new SetOnce<>(); private final SetOnce errorStoreInitialisationService = new SetOnce<>(); @@ -141,6 +148,7 @@ public List> getSettings() { List> pluginSettings = new ArrayList<>(); pluginSettings.add(TIME_SERIES_POLL_INTERVAL); pluginSettings.add(LOOK_AHEAD_TIME); + pluginSettings.add(LOOK_BACK_TIME); pluginSettings.add(DataStreamLifecycleService.DATA_STREAM_LIFECYCLE_POLL_INTERVAL_SETTING); pluginSettings.add(DataStreamLifecycleService.DATA_STREAM_MERGE_POLICY_TARGET_FLOOR_SEGMENT_SETTING); pluginSettings.add(DataStreamLifecycleService.DATA_STREAM_MERGE_POLICY_TARGET_FACTOR_SETTING); diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java index d1a2237b56a8b..f973eb95b39ce 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/UpdateTimeSeriesRangeService.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.Priority; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; @@ -156,7 +157,7 @@ void scheduleTask() { job = threadPool.scheduleWithFixedDelay( () -> perform(() -> LOGGER.debug("completed tsdb update task")), pollInterval, - ThreadPool.Names.SAME + EsExecutors.DIRECT_EXECUTOR_SERVICE ); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java index e6b4e22d3acf1..21ef1811fe3a3 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStore.java @@ -31,14 +31,17 @@ public class DataStreamLifecycleErrorStore { /** * Records a string representation of the provided exception for the provided index. * If an error was already recorded for the provided index this will override that error. + * + * Returns the previously recorded error for the provided index, or null otherwise. */ - public void recordError(String indexName, Exception e) { + @Nullable + public String recordError(String indexName, Exception e) { String exceptionToString = Strings.toString(((builder, params) -> { ElasticsearchException.generateThrowableXContent(builder, EMPTY_PARAMS, e); return builder; })); String recordedError = Strings.substring(exceptionToString, 0, MAX_ERROR_MESSAGE_LENGTH); - indexNameToError.put(indexName, recordedError); + return indexNameToError.put(indexName, recordedError); } /** diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java index b96fe5fd5b05f..3311d064b4816 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleService.java @@ -83,7 +83,6 @@ import static org.elasticsearch.cluster.metadata.IndexMetadata.DownsampleTaskStatus.SUCCESS; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_DOWNSAMPLE_STATUS; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; -import static org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask.REPLACEMENT_SOURCE_INDEX; /** * This service will implement the needed actions (e.g. rollover, retention) to manage the data streams with a data stream lifecycle @@ -128,7 +127,7 @@ public class DataStreamLifecycleService implements ClusterStateListener, Closeab /* * This is the key for data stream lifecycle related custom index metadata. */ - static final String FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY = "force_merge_completed_timestamp"; + public static final String FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY = "force_merge_completed_timestamp"; private final Settings settings; private final Client client; private final ClusterService clusterService; @@ -396,42 +395,24 @@ Set maybeExecuteDownsampling(ClusterState state, DataStream dataStream, L String indexName = index.getName(); IndexMetadata.DownsampleTaskStatus backingIndexDownsamplingStatus = INDEX_DOWNSAMPLE_STATUS.get(backingIndexMeta.getSettings()); - String backingIndexDownsamplingSource = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings()); + String downsamplingSourceIndex = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(backingIndexMeta.getSettings()); // if the current index is not a downsample we want to mark the index as read-only before proceeding with downsampling - if (org.elasticsearch.common.Strings.hasText(backingIndexDownsamplingSource) == false + if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) == false && state.blocks().indexBlocked(ClusterBlockLevel.WRITE, indexName) == false) { affectedIndices.add(index); addIndexBlockOnce(indexName); - } else if (org.elasticsearch.common.Strings.hasText(backingIndexDownsamplingSource) + } else if (org.elasticsearch.common.Strings.hasText(downsamplingSourceIndex) && backingIndexDownsamplingStatus.equals(SUCCESS)) { // if the backing index is a downsample index itself, let's check if its source index still exists as we must delete it - Map lifecycleMetadata = backingIndexMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY); - - // TODO document that we don't handle downsample indices that were added to the data stream manually (because we - // TODO currently can't reliably identify the source index to delete when multiple rounds of donwsampling are - // TODO involved unless DSL stores the needed metadata in the index metadata) - if (lifecycleMetadata != null && lifecycleMetadata.containsKey(REPLACEMENT_SOURCE_INDEX)) { - String actualDownsamplingSource = lifecycleMetadata.get(REPLACEMENT_SOURCE_INDEX); - IndexMetadata downsampleSourceIndex = metadata.index(actualDownsamplingSource); - if (downsampleSourceIndex != null) { - // we mark the backing index as affected as we don't want subsequent operations that might change its state to - // be performed, as we might lose the way to identify that we must delete its replacement source index - affectedIndices.add(index); - // delete downsampling source index (that's not part of the data stream anymore) before doing any more - // downsampling - deleteIndexOnce(backingIndexDownsamplingSource, "replacement with its downsampled index in the data stream"); - } - } else { - logger.trace( - "Data stream lifecycle encountered managed index [{}] as part of data stream [{}] which was " - + "downsampled from source [{} ]. This index was manually downsampled but data stream lifecycle service " - + "only supports downsampled indices through the data stream lifecycle. This index will be ignored from " - + "lifecycle donwsampling", - indexName, - dataStream, - backingIndexDownsamplingSource - ); + IndexMetadata downsampleSourceIndex = metadata.index(downsamplingSourceIndex); + if (downsampleSourceIndex != null) { + // we mark the backing index as affected as we don't want subsequent operations that might change its state to + // be performed, as we might lose the way to identify that we must delete its replacement source index + affectedIndices.add(index); + // delete downsampling source index (that's not part of the data stream anymore) before doing any more + // downsampling + deleteIndexOnce(downsamplingSourceIndex, "replacement with its downsampled index in the data stream"); } } @@ -509,7 +490,15 @@ private void downsampleIndexOnce(DataStreamLifecycle.Downsampling.Round round, S DownsampleAction.Request request = new DownsampleAction.Request(sourceIndex, downsampleIndexName, null, round.config()); transportActionsDeduplicator.executeOnce( request, - new ErrorRecordingActionListener(sourceIndex, errorStore), + new ErrorRecordingActionListener( + sourceIndex, + errorStore, + Strings.format( + "Data stream lifecycle encountered an error trying to downsample index [%s]. Data stream lifecycle will " + + "attempt to downsample the index on its next run.", + sourceIndex + ) + ), (req, reqListener) -> downsampleIndex(request, reqListener) ); } @@ -594,7 +583,16 @@ private Set evaluateDownsampleStatus( private void replaceBackingIndexWithDownsampleIndexOnce(DataStream dataStream, String backingIndexName, String downsampleIndexName) { clusterStateChangesDeduplicator.executeOnce( new ReplaceSourceWithDownsampleIndexTask(dataStream.getName(), backingIndexName, downsampleIndexName, null), - new ErrorRecordingActionListener(backingIndexName, errorStore), + new ErrorRecordingActionListener( + backingIndexName, + errorStore, + Strings.format( + "Data stream lifecycle encountered an error trying to replace index [%s] with index [%s] in data stream [%s]", + backingIndexName, + downsampleIndexName, + dataStream + ) + ), (req, reqListener) -> { logger.trace( "Data stream lifecycle issues request to replace index [{}] with index [{}] in data stream [{}]", @@ -618,7 +616,11 @@ private void deleteIndexOnce(String indexName, String reason) { DeleteIndexRequest deleteIndexRequest = new DeleteIndexRequest(indexName).masterNodeTimeout(TimeValue.MAX_VALUE); transportActionsDeduplicator.executeOnce( deleteIndexRequest, - new ErrorRecordingActionListener(indexName, errorStore), + new ErrorRecordingActionListener( + indexName, + errorStore, + Strings.format("Data stream lifecycle encountered an error trying to delete index [%s]", indexName) + ), (req, reqListener) -> deleteIndex(deleteIndexRequest, reason, reqListener) ); } @@ -630,7 +632,11 @@ private void addIndexBlockOnce(String indexName) { AddIndexBlockRequest addIndexBlockRequest = new AddIndexBlockRequest(WRITE, indexName).masterNodeTimeout(TimeValue.MAX_VALUE); transportActionsDeduplicator.executeOnce( addIndexBlockRequest, - new ErrorRecordingActionListener(indexName, errorStore), + new ErrorRecordingActionListener( + indexName, + errorStore, + Strings.format("Data stream lifecycle service encountered an error trying to mark index [%s] as readonly", indexName) + ), (req, reqListener) -> addIndexBlock(addIndexBlockRequest, reqListener) ); } @@ -678,7 +684,11 @@ private void maybeExecuteRollover(ClusterState state, DataStream dataStream) { ); transportActionsDeduplicator.executeOnce( rolloverRequest, - new ErrorRecordingActionListener(writeIndex.getName(), errorStore), + new ErrorRecordingActionListener( + writeIndex.getName(), + errorStore, + Strings.format("Data stream lifecycle encountered an error trying to rollover data steam [%s]", dataStream.getName()) + ), (req, reqListener) -> rolloverDataStream(writeIndex.getName(), rolloverRequest, reqListener) ); } @@ -746,7 +756,15 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice affectedIndices.add(index); transportActionsDeduplicator.executeOnce( updateMergePolicySettingsRequest, - new ErrorRecordingActionListener(indexName, errorStore), + new ErrorRecordingActionListener( + indexName, + errorStore, + Strings.format( + "Data stream lifecycle encountered an error trying to to update settings [%s] for index [%s]", + updateMergePolicySettingsRequest.settings().keySet(), + indexName + ) + ), (req, reqListener) -> updateIndexSetting(updateMergePolicySettingsRequest, reqListener) ); } else { @@ -755,7 +773,15 @@ private Set maybeExecuteForceMerge(ClusterState state, List indice // time to force merge the index transportActionsDeduplicator.executeOnce( new ForceMergeRequestWrapper(forceMergeRequest), - new ErrorRecordingActionListener(indexName, errorStore), + new ErrorRecordingActionListener( + indexName, + errorStore, + Strings.format( + "Data stream lifecycle encountered an error trying to force merge index [%s]. Data stream lifecycle will " + + "attempt to force merge the index on its next run.", + indexName + ) + ), (req, reqListener) -> forceMergeIndex(forceMergeRequest, reqListener) ); } @@ -791,10 +817,6 @@ public void onResponse(RolloverResponse rolloverResponse) { @Override public void onFailure(Exception e) { - logger.error( - () -> Strings.format("Data stream lifecycle encountered an error trying to rollover data steam [%s]", rolloverTarget), - e - ); DataStream dataStream = clusterService.state().metadata().dataStreams().get(rolloverTarget); if (dataStream == null || dataStream.getWriteIndex().getName().equals(writeIndexName) == false) { // the data stream has another write index so no point in recording an error for the previous write index we were @@ -880,10 +902,6 @@ public void onResponse(AddIndexBlockResponse addIndexBlockResponse) { + "read-only block to index [{}], but the response didn't contain an explicit result for the index.", targetIndex ); - logger.error( - "Data stream lifecycle service request to mark index [{}] as readonly was not acknowledged", - targetIndex - ); listener.onFailure( new ElasticsearchException("request to mark index [" + targetIndex + "] as read-only was not acknowledged") ); @@ -911,10 +929,6 @@ public void onResponse(AddIndexBlockResponse addIndexBlockResponse) { listener.onFailure(new ElasticsearchException(errorMessage)); } } else { - logger.error( - "Data stream lifecycle service request to mark index [{}] as readonly was not acknowledged", - targetIndex - ); listener.onFailure( new ElasticsearchException("request to mark index [" + targetIndex + "] as read-only was not acknowledged") ); @@ -960,6 +974,7 @@ public void onResponse(AcknowledgedResponse acknowledgedResponse) { @Override public void onFailure(Exception e) { if (e instanceof IndexNotFoundException) { + logger.trace("Data stream lifecycle did not delete index [{}] as it was already deleted", targetIndex); // index was already deleted, treat this as a success errorStore.clearRecordedError(targetIndex); listener.onResponse(null); @@ -972,11 +987,6 @@ public void onFailure(Exception e) { + "the next data stream lifecycle run", targetIndex ); - } else { - logger.error( - () -> Strings.format("Data stream lifecycle encountered an error trying to delete index [%s]", targetIndex), - e - ); } listener.onFailure(e); } @@ -997,20 +1007,7 @@ public void onResponse(AcknowledgedResponse acknowledgedResponse) { @Override public void onFailure(Exception e) { - String previousError = errorStore.getError(sourceIndex); - listener.onFailure(e); - // To avoid spamming our logs, we only want to log the error once. - if (previousError == null || previousError.equals(errorStore.getError(sourceIndex)) == false) { - logger.error( - () -> Strings.format( - "Data stream lifecycle encountered an error trying to downsample index [%s]. Data stream lifecycle will " - + "attempt to downsample the index on its next run.", - sourceIndex - ), - e - ); - } } }); } @@ -1054,23 +1051,7 @@ public void onResponse(ForceMergeResponse forceMergeResponse) { @Override public void onFailure(Exception e) { - String previousError = errorStore.getError(targetIndex); - /* - * Note that this call to onFailure has to happen before the logging because we choose whether to log or not based on a - * side effect of the onFailure call (it updates the error in the errorStore). - */ listener.onFailure(e); - // To avoid spamming our logs, we only want to log the error once. - if (previousError == null || previousError.equals(errorStore.getError(targetIndex)) == false) { - logger.warn( - () -> Strings.format( - "Data stream lifecycle encountered an error trying to force merge index [%s]. Data stream lifecycle will " - + "attempt to force merge the index on its next run.", - targetIndex - ), - e - ); - } } }); } @@ -1113,10 +1094,12 @@ static TimeValue getRetentionConfiguration(DataStream dataStream) { static class ErrorRecordingActionListener implements ActionListener { private final String targetIndex; private final DataStreamLifecycleErrorStore errorStore; + private final String errorLogMessage; - ErrorRecordingActionListener(String targetIndex, DataStreamLifecycleErrorStore errorStore) { + ErrorRecordingActionListener(String targetIndex, DataStreamLifecycleErrorStore errorStore, String errorLogMessage) { this.targetIndex = targetIndex; this.errorStore = errorStore; + this.errorLogMessage = errorLogMessage; } @Override @@ -1126,7 +1109,21 @@ public void onResponse(Void unused) { @Override public void onFailure(Exception e) { - errorStore.recordError(targetIndex, e); + recordAndLogError(targetIndex, errorStore, e, errorLogMessage); + } + } + + /** + * Records the provided error for the index in the error store and logs the error message at `ERROR` level if the error for the index + * is different to what's already in the error store. + * This allows us to not spam the logs and only log new errors when we're about to record them in the store. + */ + static void recordAndLogError(String targetIndex, DataStreamLifecycleErrorStore errorStore, Exception e, String logMessage) { + String previousError = errorStore.recordError(targetIndex, e); + if (previousError == null || previousError.equals(errorStore.getError(targetIndex)) == false) { + logger.error(logMessage, e); + } else { + logger.trace(logMessage, e); } } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java index 273f846775f14..8f0df2b130517 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/ExplainDataStreamLifecycleAction.java @@ -151,7 +151,7 @@ public Response(List indices, @Nullable Rollove public Response(StreamInput in) throws IOException { super(in); - this.indices = in.readList(ExplainIndexDataStreamLifecycle::new); + this.indices = in.readCollectionAsList(ExplainIndexDataStreamLifecycle::new); this.rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } @@ -165,7 +165,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(indices); + out.writeCollection(indices); out.writeOptionalWriteable(rolloverConfiguration); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java index 78100f6c2bec3..28e0e0ceb0915 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/GetDataStreamLifecycleAction.java @@ -191,7 +191,7 @@ public Response(List dataStreamLifecycles, @Nullable Rollov } public Response(StreamInput in) throws IOException { - this(in.readList(Response.DataStreamLifecycle::new), in.readOptionalWriteable(RolloverConfiguration::new)); + this(in.readCollectionAsList(Response.DataStreamLifecycle::new), in.readOptionalWriteable(RolloverConfiguration::new)); } public List getDataStreamLifecycles() { @@ -205,7 +205,7 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(dataStreamLifecycles); + out.writeCollection(dataStreamLifecycles); out.writeOptionalWriteable(rolloverConfiguration); } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java index a5f3a58528e48..641b89a513d40 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/action/PutDataStreamLifecycleAction.java @@ -15,10 +15,12 @@ import org.elasticsearch.action.support.master.AcknowledgedRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.xcontent.AbstractObjectParser; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ToXContentObject; @@ -30,6 +32,7 @@ import java.util.Objects; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DATA_RETENTION_FIELD; +import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.DOWNSAMPLING_FIELD; import static org.elasticsearch.cluster.metadata.DataStreamLifecycle.ENABLED_FIELD; /** @@ -48,7 +51,7 @@ public static final class Request extends AcknowledgedRequest implement public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "put_data_stream_lifecycle_request", - args -> new Request(null, ((TimeValue) args[0]), (Boolean) args[1]) + args -> new Request(null, ((TimeValue) args[0]), (Boolean) args[1], (Downsampling) args[2]) ); static { @@ -59,6 +62,13 @@ public static final class Request extends AcknowledgedRequest implement ObjectParser.ValueType.STRING_OR_NULL ); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), ENABLED_FIELD); + PARSER.declareField(ConstructingObjectParser.optionalConstructorArg(), (p, c) -> { + if (p.currentToken() == XContentParser.Token.VALUE_NULL) { + return Downsampling.NULL; + } else { + return new Downsampling(AbstractObjectParser.parseArray(p, c, Downsampling.Round::fromXContent)); + } + }, DOWNSAMPLING_FIELD, ObjectParser.ValueType.OBJECT_ARRAY_OR_NULL); } public static Request parseRequest(XContentParser parser) { @@ -85,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { } public Request(String[] names, @Nullable TimeValue dataRetention) { - this(names, dataRetention, null); + this(names, dataRetention, null, null); } public Request(String[] names, DataStreamLifecycle lifecycle) { @@ -94,8 +104,16 @@ public Request(String[] names, DataStreamLifecycle lifecycle) { } public Request(String[] names, @Nullable TimeValue dataRetention, @Nullable Boolean enabled) { + this(names, dataRetention, enabled, null); + } + + public Request(String[] names, @Nullable TimeValue dataRetention, @Nullable Boolean enabled, @Nullable Downsampling downsampling) { this.names = names; - this.lifecycle = DataStreamLifecycle.newBuilder().dataRetention(dataRetention).enabled(enabled == null || enabled).build(); + this.lifecycle = DataStreamLifecycle.newBuilder() + .dataRetention(dataRetention) + .enabled(enabled == null || enabled) + .downsampling(downsampling) + .build(); } public String[] getNames() { diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java index a43fc58c3cf50..625c1f71a92db 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceBackingWithDownsampleIndexExecutor.java @@ -16,7 +16,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SimpleBatchedExecutor; -import org.elasticsearch.core.Strings; +import org.elasticsearch.common.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.snapshots.SnapshotInProgressException; @@ -53,6 +53,11 @@ public void taskSucceeded(ReplaceSourceWithDownsampleIndexTask task, Void unused ); task.getListener().onResponse(null); + LOGGER.trace( + "Issuing request to delete index [{}] as it's not part of data stream [{}] anymore", + task.getSourceBackingIndex(), + task.getDataStreamName() + ); // chain an optimistic delete of the source index call here (if it fails it'll be retried by the data stream lifecycle loop) client.admin().indices().delete(new DeleteIndexRequest(task.getSourceBackingIndex()), new ActionListener<>() { @Override @@ -78,6 +83,7 @@ public void onResponse(AcknowledgedResponse acknowledgedResponse) { public void onFailure(Exception e) { if (e instanceof IndexNotFoundException) { // index was already deleted, treat this as a success + LOGGER.trace("Did not delete index [{}] as it was already deleted", task.getSourceBackingIndex()); return; } diff --git a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java index ba9e79962e323..70cf57456e099 100644 --- a/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java +++ b/modules/data-streams/src/main/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTask.java @@ -21,7 +21,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexSettings; -import java.util.HashMap; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -35,7 +34,6 @@ */ public class ReplaceSourceWithDownsampleIndexTask implements ClusterStateTaskListener { private static final Logger LOGGER = LogManager.getLogger(ReplaceSourceWithDownsampleIndexTask.class); - public static final String REPLACEMENT_SOURCE_INDEX = "replacement_source_index"; private ActionListener listener; private final String dataStreamName; private final String sourceBackingIndex; @@ -166,13 +164,11 @@ private static IndexMetadata copyDataStreamLifecycleState( ) { IndexMetadata.Builder downsampleIndexBuilder = IndexMetadata.builder(dest); Map lifecycleCustomMetadata = source.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY); - Map newCustomMetadata = new HashMap<>(); if (lifecycleCustomMetadata != null) { - newCustomMetadata.putAll(lifecycleCustomMetadata); + // this will, for now, ensure that DSL tail merging is skipped for the downsample index (and it should be as the downsample + // transport action forcemerged the downsample index to 1 segment) + downsampleIndexBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, lifecycleCustomMetadata); } - newCustomMetadata.put(REPLACEMENT_SOURCE_INDEX, source.getIndex().getName()); - downsampleIndexBuilder.putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, newCustomMetadata); - if (IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.exists(dest.getSettings()) == false) { downsampleIndexBuilder.settings( Settings.builder() diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java index 27fe65ba309d3..23a86b657b82d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/DataStreamIndexSettingsProviderTests.java @@ -38,6 +38,7 @@ public class DataStreamIndexSettingsProviderTests extends ESTestCase { + private static final TimeValue DEFAULT_LOOK_BACK_TIME = TimeValue.timeValueHours(2); // default private static final TimeValue DEFAULT_LOOK_AHEAD_TIME = TimeValue.timeValueHours(2); // default DataStreamIndexSettingsProvider provider; @@ -83,7 +84,7 @@ public void testGetAdditionalIndexSettings() throws Exception { List.of(new CompressedXContent(mapping)) ); assertThat(result.size(), equalTo(3)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); assertThat(IndexMetadata.INDEX_ROUTING_PATH.get(result), contains("field3")); } @@ -235,10 +236,31 @@ public void testGetAdditionalIndexSettingsLookAheadTime() throws Exception { List.of(new CompressedXContent("{}")) ); assertThat(result.size(), equalTo(2)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookAheadTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(lookAheadTime.getMillis()))); } + public void testGetAdditionalIndexSettingsLookBackTime() throws Exception { + Metadata metadata = Metadata.EMPTY_METADATA; + String dataStreamName = "logs-app1"; + + Instant now = Instant.now().truncatedTo(ChronoUnit.SECONDS); + TimeValue lookBackTime = TimeValue.timeValueHours(12); + Settings settings = builder().put("index.mode", "time_series").put("index.look_back_time", lookBackTime.getStringRep()).build(); + Settings result = provider.getAdditionalIndexSettings( + DataStream.getDefaultBackingIndexName(dataStreamName, 1), + dataStreamName, + true, + metadata, + now, + settings, + List.of(new CompressedXContent("{}")) + ); + assertThat(result.size(), equalTo(2)); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(lookBackTime.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + } + public void testGetAdditionalIndexSettingsDataStreamAlreadyCreated() throws Exception { String dataStreamName = "logs-app1"; TimeValue lookAheadTime = TimeValue.timeValueHours(2); @@ -358,7 +380,7 @@ public void testGetAdditionalIndexSettingsMigrateToTsdb() { List.of() ); assertThat(result.size(), equalTo(2)); - assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); + assertThat(IndexSettings.TIME_SERIES_START_TIME.get(result), equalTo(now.minusMillis(DEFAULT_LOOK_BACK_TIME.getMillis()))); assertThat(IndexSettings.TIME_SERIES_END_TIME.get(result), equalTo(now.plusMillis(DEFAULT_LOOK_AHEAD_TIME.getMillis()))); } diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java index a0ed1a83d0de1..a612587262463 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/LookAHeadTimeTests.java @@ -16,7 +16,6 @@ import org.elasticsearch.test.ESSingleNodeTestCase; import org.junit.After; -import java.io.IOException; import java.util.Collection; import java.util.List; @@ -52,7 +51,7 @@ public void testTimeSeriesPollIntervalSettingToHigh() { assertThat(e.getMessage(), equalTo("failed to parse value [11m] for setting [time_series.poll_interval], must be <= [10m]")); } - public void testLookAheadTimeSetting() throws IOException { + public void testLookAheadTimeSetting() { var settings = Settings.builder().put(DataStreamsPlugin.LOOK_AHEAD_TIME.getKey(), "10m").build(); updateIndexSettings(settings); } @@ -69,6 +68,18 @@ public void testLookAheadTimeSettingToHigh() { assertThat(e.getMessage(), equalTo("failed to parse value [8d] for setting [index.look_ahead_time], must be <= [7d]")); } + public void testLookBackTimeSettingToLow() { + var settings = Settings.builder().put(DataStreamsPlugin.LOOK_BACK_TIME.getKey(), "1s").build(); + var e = expectThrows(IllegalArgumentException.class, () -> updateIndexSettings(settings)); + assertThat(e.getMessage(), equalTo("failed to parse value [1s] for setting [index.look_back_time], must be >= [1m]")); + } + + public void testLookBackTimeSettingToHigh() { + var settings = Settings.builder().put(DataStreamsPlugin.LOOK_BACK_TIME.getKey(), "8d").build(); + var e = expectThrows(IllegalArgumentException.class, () -> updateIndexSettings(settings)); + assertThat(e.getMessage(), equalTo("failed to parse value [8d] for setting [index.look_back_time], must be <= [7d]")); + } + public void testLookAheadTimeSettingLowerThanTimeSeriesPollIntervalSetting() { { var settings = Settings.builder() @@ -99,7 +110,7 @@ public void testLookAheadTimeSettingLowerThanTimeSeriesPollIntervalSetting() { } } - public void testLookAheadTimeSettingHigherThanTimeSeriesPollIntervalSetting() throws IOException { + public void testLookAheadTimeSettingHigherThanTimeSeriesPollIntervalSetting() { var clusterSettings = Settings.builder().put(DataStreamsPlugin.TIME_SERIES_POLL_INTERVAL.getKey(), "10m").build(); updateClusterSettings(clusterSettings); var indexSettings = Settings.builder().put(DataStreamsPlugin.LOOK_AHEAD_TIME.getKey(), "100m").build(); @@ -110,7 +121,7 @@ private void updateClusterSettings(Settings settings) { clusterAdmin().updateSettings(new ClusterUpdateSettingsRequest().persistentSettings(settings)).actionGet(); } - private void updateIndexSettings(Settings settings) throws IOException { + private void updateIndexSettings(Settings settings) { try { createIndex("test"); } catch (ResourceAlreadyExistsException e) { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java index 1107ecdf5a071..c0cb1e5452c3d 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/MetadataDataStreamRolloverServiceTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateUtils; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexMode; import org.elasticsearch.index.IndexSettings; @@ -36,12 +37,14 @@ import java.time.Instant; import java.time.temporal.ChronoUnit; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_INDEX_UUID; import static org.elasticsearch.datastreams.DataStreamIndexSettingsProvider.FORMATTER; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; @@ -338,6 +341,164 @@ public void testChangingIndexModeFromTimeSeriesToSomethingElseNoEffectOnExisting } } + public void testRolloverClusterStateWithBrokenOlderTsdbDataStream() throws Exception { + Instant now = Instant.now(); + String dataStreamName = "metrics-my-app"; + int numberOfBackingIndices = randomIntBetween(1, 3); + ClusterState clusterState = createClusterState(dataStreamName, numberOfBackingIndices, now, true); + DataStream dataStream = clusterState.metadata().dataStreams().get(dataStreamName); + + ThreadPool testThreadPool = new TestThreadPool(getTestName()); + try { + MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService( + dataStream, + testThreadPool, + Set.of(createSettingsProvider(xContentRegistry())), + xContentRegistry() + ); + MaxDocsCondition condition = new MaxDocsCondition(randomNonNegativeLong()); + List> metConditions = Collections.singletonList(condition); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); + IndexMetadataStats indexStats = new IndexMetadataStats(IndexWriteLoad.builder(1).build(), 10, 10); + + long before = testThreadPool.absoluteTimeInMillis(); + MetadataRolloverService.RolloverResult rolloverResult = rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + createIndexRequest, + metConditions, + now, + randomBoolean(), + false, + indexStats + ); + long after = testThreadPool.absoluteTimeInMillis(); + + String sourceIndexName = DataStream.getDefaultBackingIndexName(dataStream.getName(), dataStream.getGeneration()); + String newIndexName = DataStream.getDefaultBackingIndexName(dataStream.getName(), dataStream.getGeneration() + 1); + assertEquals(sourceIndexName, rolloverResult.sourceIndexName()); + assertEquals(newIndexName, rolloverResult.rolloverIndexName()); + Metadata rolloverMetadata = rolloverResult.clusterState().metadata(); + assertEquals(dataStream.getIndices().size() + 1, rolloverMetadata.indices().size()); + IndexMetadata rolloverIndexMetadata = rolloverMetadata.index(newIndexName); + + IndexAbstraction ds = rolloverMetadata.getIndicesLookup().get(dataStream.getName()); + assertThat(ds.getType(), equalTo(IndexAbstraction.Type.DATA_STREAM)); + assertThat(ds.getIndices(), hasSize(dataStream.getIndices().size() + 1)); + assertThat(ds.getIndices(), hasItem(rolloverMetadata.index(sourceIndexName).getIndex())); + assertThat(ds.getIndices(), hasItem(rolloverIndexMetadata.getIndex())); + assertThat(ds.getWriteIndex(), equalTo(rolloverIndexMetadata.getIndex())); + assertThat(((DataStream) ds).getIndexMode(), equalTo(IndexMode.TIME_SERIES)); + + RolloverInfo info = rolloverMetadata.index(sourceIndexName).getRolloverInfos().get(dataStream.getName()); + assertThat(info.getTime(), lessThanOrEqualTo(after)); + assertThat(info.getTime(), greaterThanOrEqualTo(before)); + assertThat(info.getMetConditions(), hasSize(1)); + assertThat(info.getMetConditions().get(0).value(), equalTo(condition.value())); + + for (int i = 0; i < numberOfBackingIndices; i++) { + var im = rolloverMetadata.index(rolloverMetadata.dataStreams().get(dataStreamName).getIndices().get(i)); + var startTime1 = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); + var endTime1 = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); + assertThat(startTime1.toEpochMilli(), equalTo(DateUtils.MAX_MILLIS_BEFORE_MINUS_9999)); + assertThat(endTime1.toEpochMilli(), equalTo(DateUtils.MAX_MILLIS_BEFORE_9999)); + assertThat(im.getIndexMode(), equalTo(null)); + } + { + var im = rolloverMetadata.index( + rolloverMetadata.dataStreams().get(dataStreamName).getIndices().get(numberOfBackingIndices) + ); + var lastStartTime = IndexSettings.TIME_SERIES_START_TIME.get(im.getSettings()); + var kastEndTime = IndexSettings.TIME_SERIES_END_TIME.get(im.getSettings()); + assertThat(lastStartTime, equalTo(now.minus(2, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS))); + assertThat(kastEndTime, equalTo(now.plus(2, ChronoUnit.HOURS).truncatedTo(ChronoUnit.SECONDS))); + assertThat(im.getIndexMode(), equalTo(IndexMode.TIME_SERIES)); + } + } finally { + testThreadPool.shutdown(); + } + } + + public void testRolloverClusterStateWithBrokenTsdbDataStream() throws Exception { + Instant now = Instant.now(); + String dataStreamName = "metrics-my-app"; + int numberOfBackingIndices = randomIntBetween(1, 3); + ClusterState clusterState = createClusterState(dataStreamName, numberOfBackingIndices, now, false); + DataStream dataStream = clusterState.metadata().dataStreams().get(dataStreamName); + + ThreadPool testThreadPool = new TestThreadPool(getTestName()); + try { + MetadataRolloverService rolloverService = DataStreamTestHelper.getMetadataRolloverService( + dataStream, + testThreadPool, + Set.of(createSettingsProvider(xContentRegistry())), + xContentRegistry() + ); + MaxDocsCondition condition = new MaxDocsCondition(randomNonNegativeLong()); + List> metConditions = Collections.singletonList(condition); + CreateIndexRequest createIndexRequest = new CreateIndexRequest("_na_"); + IndexMetadataStats indexStats = new IndexMetadataStats(IndexWriteLoad.builder(1).build(), 10, 10); + + Exception e = expectThrows( + IllegalArgumentException.class, + () -> rolloverService.rolloverClusterState( + clusterState, + dataStream.getName(), + null, + createIndexRequest, + metConditions, + now, + randomBoolean(), + false, + indexStats + ) + ); + assertThat(e.getMessage(), containsString("is overlapping with backing index")); + } finally { + testThreadPool.shutdown(); + } + } + + private static ClusterState createClusterState(String dataStreamName, int numberOfBackingIndices, Instant now, boolean includeVersion) { + List backingIndices = new ArrayList<>(numberOfBackingIndices); + for (int i = 1; i <= numberOfBackingIndices; i++) { + backingIndices.add(new Index(DataStream.getDefaultBackingIndexName(dataStreamName, i, now.toEpochMilli()), "uuid" + i)); + } + final DataStream dataStream = new DataStream( + dataStreamName, + backingIndices, + numberOfBackingIndices, + null, + false, + false, + false, + false, + null + ); + ComposableIndexTemplate template = new ComposableIndexTemplate.Builder().indexPatterns(List.of(dataStream.getName() + "*")) + .template( + new Template(Settings.builder().put("index.mode", "time_series").put("index.routing_path", "uid").build(), null, null) + ) + .dataStreamTemplate(new ComposableIndexTemplate.DataStreamTemplate(false, false)) + .build(); + Metadata.Builder builder = Metadata.builder(); + builder.put("template", template); + + for (Index backingIndex : backingIndices) { + var settings = settings(IndexVersion.current()).put("index.hidden", true) + .put(SETTING_INDEX_UUID, backingIndex.getUUID()) + .put("index.mode", "time_series") + .put("index.routing_path", "uid"); + if (includeVersion) { + settings.put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.V_8_9_0); + } + builder.put(IndexMetadata.builder(backingIndex.getName()).settings(settings).numberOfShards(1).numberOfReplicas(0)); + } + builder.put(dataStream); + return ClusterState.builder(new ClusterName("test")).metadata(builder).build(); + } + static DataStreamIndexSettingsProvider createSettingsProvider(NamedXContentRegistry xContentRegistry) { return new DataStreamIndexSettingsProvider( im -> MapperTestUtils.newMapperService(xContentRegistry, createTempDir(), im.getSettings(), im.getIndex().getName()) diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java index 92491b0ed7e34..ace52cd4c3f8b 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleErrorStoreTests.java @@ -16,6 +16,7 @@ import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore.MAX_ERROR_MESSAGE_LENGTH; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -30,10 +31,15 @@ public void setupServices() { } public void testRecordAndRetrieveError() { - errorStore.recordError("test", new NullPointerException("testing")); + String existingRecordedError = errorStore.recordError("test", new NullPointerException("testing")); + assertThat(existingRecordedError, is(nullValue())); assertThat(errorStore.getError("test"), is(notNullValue())); assertThat(errorStore.getAllIndices().size(), is(1)); assertThat(errorStore.getAllIndices().get(0), is("test")); + + existingRecordedError = errorStore.recordError("test", new IllegalStateException("bad state")); + assertThat(existingRecordedError, is(notNullValue())); + assertThat(existingRecordedError, containsString("testing")); } public void testRetrieveAfterClear() { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java index 3c827e37a1a03..3a5afd2042565 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.datastreams.lifecycle; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; @@ -1017,6 +1016,7 @@ public void testDownsampling() throws Exception { Settings.builder() .put(firstGenMetadata.getSettings()) .put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY, firstGenIndexName) + .put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME_KEY, firstGenIndexName) .put(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey(), SUCCESS) ) .numberOfReplicas(0) @@ -1158,7 +1158,7 @@ private ClusterState createClusterState(String indexName, Map cu Settings indexSettings = Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), randomIntBetween(1, 10)) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), randomIntBetween(0, 3)) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build(); IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexName).version(randomLong()).settings(indexSettings); if (customDataStreamLifecycleMetadata != null) { diff --git a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java index f40abbd1f1573..c3d1262c72dce 100644 --- a/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java +++ b/modules/data-streams/src/test/java/org/elasticsearch/datastreams/lifecycle/downsampling/ReplaceSourceWithDownsampleIndexTaskTests.java @@ -24,10 +24,11 @@ import org.junit.Before; import java.util.Locale; +import java.util.Map; import static org.elasticsearch.datastreams.DataStreamsPlugin.LIFECYCLE_CUSTOM_INDEX_METADATA_KEY; import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleFixtures.createDataStream; -import static org.elasticsearch.datastreams.lifecycle.downsampling.ReplaceSourceWithDownsampleIndexTask.REPLACEMENT_SOURCE_INDEX; +import static org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService.FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; @@ -159,6 +160,12 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { builder.put(dataStream); ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + // let's add some lifecycle custom metadata to the first generation index + IndexMetadata indexMetadata = previousState.metadata().index(firstGenIndex); + IndexMetadata.Builder firstGenBuilder = IndexMetadata.builder(indexMetadata) + .putCustom(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY, Map.of(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY, String.valueOf(now))); + Metadata.Builder metaBuilder = Metadata.builder(previousState.metadata()).put(firstGenBuilder); + previousState = ClusterState.builder(previousState).metadata(metaBuilder).build(); ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( previousState ); @@ -181,14 +188,57 @@ public void testSourceIsReplacedWithDownsampleAndOriginationDateIsConfigured() { IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(rolloverInfo.getTime())); - // the donwsample index contains metadata to remember the index we downsampled from - assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), is(notNullValue())); + assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), notNullValue()); assertThat( - downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY).get(REPLACEMENT_SOURCE_INDEX), - is(sourceIndexAbstraction.getName()) + downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY).get(FORCE_MERGE_COMPLETED_TIMESTAMP_METADATA_KEY), + is(String.valueOf(now)) ); } + public void testSourceWithoutLifecycleMetaAndDestWithOriginationDateAlreadyConfigured() { + String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + int numBackingIndices = 3; + Metadata.Builder builder = Metadata.builder(); + DataStream dataStream = createDataStream( + builder, + dataStreamName, + numBackingIndices, + settings(IndexVersion.current()), + DataStreamLifecycle.newBuilder().dataRetention(TimeValue.MAX_VALUE).build(), + now + ); + String firstGenIndex = DataStream.getDefaultBackingIndexName(dataStreamName, 1); + String downsampleIndex = "downsample-1s-" + firstGenIndex; + long downsampleOriginationDate = now - randomLongBetween(10_000, 12_000); + IndexMetadata.Builder downsampleIndexMeta = IndexMetadata.builder(downsampleIndex) + .settings( + settings(IndexVersion.current()).put(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.getKey(), downsampleOriginationDate) + ) + .numberOfShards(1) + .numberOfReplicas(0); + builder.put(downsampleIndexMeta); + builder.put(dataStream); + ClusterState previousState = ClusterState.builder(ClusterName.DEFAULT).metadata(builder).build(); + + ClusterState newState = new ReplaceSourceWithDownsampleIndexTask(dataStreamName, firstGenIndex, downsampleIndex, null).execute( + previousState + ); + + IndexAbstraction downsampleIndexAbstraction = newState.metadata().getIndicesLookup().get(downsampleIndex); + assertThat(downsampleIndexAbstraction, is(notNullValue())); + assertThat(downsampleIndexAbstraction.getParentDataStream(), is(notNullValue())); + // the downsample index is part of the data stream + assertThat(downsampleIndexAbstraction.getParentDataStream().getName(), is(dataStreamName)); + + // the source index is NOT part of the data stream + IndexAbstraction sourceIndexAbstraction = newState.metadata().getIndicesLookup().get(firstGenIndex); + assertThat(sourceIndexAbstraction, is(notNullValue())); + assertThat(sourceIndexAbstraction.getParentDataStream(), is(nullValue())); + + IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); + assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(downsampleOriginationDate)); + } + public void testSourceIndexIsNotPartOfDSAnymore() { String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); int numBackingIndices = 3; @@ -227,12 +277,6 @@ public void testSourceIndexIsNotPartOfDSAnymore() { IndexMetadata firstGenMeta = newState.metadata().index(firstGenIndex); RolloverInfo rolloverInfo = firstGenMeta.getRolloverInfos().get(dataStreamName); assertThat(rolloverInfo, is(notNullValue())); - - IndexMetadata downsampleMeta = newState.metadata().index(downsampleIndex); - assertThat(IndexSettings.LIFECYCLE_ORIGINATION_DATE_SETTING.get(downsampleMeta.getSettings()), is(rolloverInfo.getTime())); - // the donwsample index contains metadata to remember the index we downsampled from - assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY), is(notNullValue())); - assertThat(downsampleMeta.getCustomData(LIFECYCLE_CUSTOM_INDEX_METADATA_KEY).get(REPLACEMENT_SOURCE_INDEX), is(firstGenIndex)); } public void testListenersIsNonConsideredInEquals() { diff --git a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml index 3731260e98f68..296c692fa2d49 100644 --- a/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml +++ b/modules/data-streams/src/yamlRestTest/resources/rest-api-spec/test/data_stream/lifecycle/20_basic.yml @@ -1,8 +1,8 @@ setup: - skip: features: allowed_warnings - version: " - 8.9.99" - reason: "Data stream lifecycles only supported in 8.10+" + version: " - 8.10.99" + reason: "Data stream lifecycles only supported in 8.11+" - do: allowed_warnings: - "index template [my-lifecycle] has index patterns [data-stream-with-lifecycle] matching patterns from existing older templates [global] with patterns (global => [*]); this template [my-lifecycle] will take precedence during new index creation" @@ -65,9 +65,22 @@ setup: - do: indices.put_data_lifecycle: name: "*" - body: - data_retention: '30d' - enabled: false + body: > + { + "downsampling": [ + { + "after": "10d", + "fixed_interval": "1h" + }, + { + "after": "100d", + "fixed_interval": "10h" + } + ], + "data_retention": "30d", + "enabled": false + } + - is_true: acknowledged - do: @@ -77,9 +90,17 @@ setup: - match: { data_streams.0.name: data-stream-with-lifecycle } - match: { data_streams.0.lifecycle.data_retention: '30d' } - match: { data_streams.0.lifecycle.enabled: false} + - match: { data_streams.0.lifecycle.downsampling.0.after: '10d'} + - match: { data_streams.0.lifecycle.downsampling.0.fixed_interval: '1h'} + - match: { data_streams.0.lifecycle.downsampling.1.after: '100d'} + - match: { data_streams.0.lifecycle.downsampling.1.fixed_interval: '10h'} - match: { data_streams.1.name: simple-data-stream1 } - match: { data_streams.1.lifecycle.data_retention: '30d' } - match: { data_streams.1.lifecycle.enabled: false} + - match: { data_streams.1.lifecycle.downsampling.0.after: '10d'} + - match: { data_streams.1.lifecycle.downsampling.0.fixed_interval: '1h'} + - match: { data_streams.1.lifecycle.downsampling.1.after: '100d'} + - match: { data_streams.1.lifecycle.downsampling.1.fixed_interval: '10h'} --- "Enable lifecycle": diff --git a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java index adaf7e0d6c4d7..a41c42260dece 100644 --- a/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java +++ b/modules/ingest-common/src/main/java/org/elasticsearch/ingest/common/GrokProcessorGetAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.ingest.common; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -61,7 +61,7 @@ public Request(boolean sorted, String ecsCompatibility) { Request(StreamInput in) throws IOException { super(in); this.sorted = in.readBoolean(); - this.ecsCompatibility = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + this.ecsCompatibility = in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readString() : GrokProcessor.DEFAULT_ECS_COMPATIBILITY_MODE; } @@ -75,7 +75,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(sorted); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeString(ecsCompatibility); } } @@ -116,7 +116,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(grokPatterns, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(grokPatterns, StreamOutput::writeString); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index 0732674632b34..d60a740930858 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -338,7 +338,7 @@ public GeoIpDownloaderStats getStatus() { private void scheduleNextRun(TimeValue time) { if (threadPool.scheduler().isShutdown() == false) { - scheduled = threadPool.schedule(this::runDownloader, time, ThreadPool.Names.GENERIC); + scheduled = threadPool.schedule(this::runDownloader, time, threadPool.generic()); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java index a8fe538f954d5..276f67f6ee736 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskParams.java @@ -9,6 +9,7 @@ package org.elasticsearch.ingest.geoip; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.persistent.PersistentTaskParams; @@ -42,7 +43,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_13_0; + return TransportVersions.V_7_13_0; } @Override diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index b5024c7c0f8ec..589950116e0af 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -9,6 +9,7 @@ package org.elasticsearch.ingest.geoip; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.VersionedNamedWriteable; @@ -122,12 +123,12 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_13_0; + return TransportVersions.V_7_13_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(databases, StreamOutput::writeString, (o, v) -> { + out.writeMap(databases, (o, v) -> { o.writeLong(v.lastUpdate); o.writeVInt(v.firstChunk); o.writeVInt(v.lastChunk); diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java index 4364d164cd287..71995864b0233 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/stats/GeoIpDownloaderStatsAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.ingest.geoip.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -98,12 +98,12 @@ public GeoIpDownloaderStats getStats() { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override @@ -164,10 +164,10 @@ public static class NodeResponse extends BaseNodeResponse { protected NodeResponse(StreamInput in) throws IOException { super(in); stats = in.readBoolean() ? new GeoIpDownloaderStats(in) : null; - databases = in.readImmutableSet(StreamInput::readString); - filesInTemp = in.readImmutableSet(StreamInput::readString); - configDatabases = in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) - ? in.readImmutableSet(StreamInput::readString) + databases = in.readCollectionAsImmutableSet(StreamInput::readString); + filesInTemp = in.readCollectionAsImmutableSet(StreamInput::readString); + configDatabases = in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) + ? in.readCollectionAsImmutableSet(StreamInput::readString) : null; } @@ -208,10 +208,10 @@ public void writeTo(StreamOutput out) throws IOException { if (stats != null) { stats.writeTo(out); } - out.writeCollection(databases, StreamOutput::writeString); - out.writeCollection(filesInTemp, StreamOutput::writeString); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { - out.writeCollection(configDatabases, StreamOutput::writeString); + out.writeStringCollection(databases); + out.writeStringCollection(filesInTemp); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { + out.writeStringCollection(configDatabases); } } diff --git a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java index d32848b529fdb..45a0f60f4e1b8 100644 --- a/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java +++ b/modules/lang-mustache/src/internalClusterTest/java/org/elasticsearch/script/mustache/MultiSearchTemplateIT.java @@ -8,7 +8,7 @@ package org.elasticsearch.script.mustache; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.common.Strings; @@ -206,7 +206,7 @@ public void testCCSCheckCompatibility() throws Exception { "[fail_before_current_version] was released first in version %s, failed compatibility " + "check trying to send it to node with version %s", FailBeforeCurrentVersionQueryBuilder.FUTURE_VERSION, - TransportVersion.MINIMUM_CCS_VERSION + TransportVersions.MINIMUM_CCS_VERSION ); String actualCause = ex.getCause().getMessage(); assertEquals(expectedCause, actualCause); diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java index b24198f10f1f3..b97d1b00573f4 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateRequest.java @@ -41,7 +41,7 @@ public MultiSearchTemplateRequest() {} MultiSearchTemplateRequest(StreamInput in) throws IOException { super(in); maxConcurrentSearchRequests = in.readVInt(); - requests = in.readList(SearchTemplateRequest::new); + requests = in.readCollectionAsList(SearchTemplateRequest::new); } /** @@ -116,7 +116,7 @@ public MultiSearchTemplateRequest indicesOptions(IndicesOptions indicesOptions) public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(maxConcurrentSearchRequests); - out.writeList(requests); + out.writeCollection(requests); } @Override diff --git a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java index f426480155356..ae4d3469f96c4 100644 --- a/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java +++ b/modules/lang-mustache/src/main/java/org/elasticsearch/script/mustache/MultiSearchTemplateResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.Strings; @@ -102,7 +102,7 @@ public String toString() { MultiSearchTemplateResponse(StreamInput in) throws IOException { super(in); items = in.readArray(Item::new, Item[]::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_0_0)) { tookInMillis = in.readVLong(); } else { tookInMillis = -1L; @@ -136,7 +136,7 @@ public TimeValue getTook() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(items); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_0_0)) { out.writeVLong(tookInMillis); } } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java index 5ebeefdaf2d16..23f8479a817d3 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextAction.java @@ -112,7 +112,7 @@ public Response(List scriptContextNames, PainlessContextInfo painlessCon public Response(StreamInput in) throws IOException { super(in); - scriptContextNames = in.readStringList(); + scriptContextNames = in.readStringCollectionAsList(); painlessContextInfo = in.readOptionalWriteable(PainlessContextInfo::new); } diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassBindingInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassBindingInfo.java index 7fa88e8f0e595..6dfa9e3b80fcb 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassBindingInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassBindingInfo.java @@ -77,7 +77,7 @@ public PainlessContextClassBindingInfo(StreamInput in) throws IOException { name = in.readString(); rtn = in.readString(); readOnly = in.readInt(); - parameters = in.readImmutableList(StreamInput::readString); + parameters = in.readCollectionAsImmutableList(StreamInput::readString); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassInfo.java index 3e1158faebd4b..1abb887418ddc 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextClassInfo.java @@ -124,22 +124,22 @@ public PainlessContextClassInfo( public PainlessContextClassInfo(StreamInput in) throws IOException { name = in.readString(); imported = in.readBoolean(); - constructors = in.readImmutableList(PainlessContextConstructorInfo::new); - staticMethods = in.readImmutableList(PainlessContextMethodInfo::new); - methods = in.readImmutableList(PainlessContextMethodInfo::new); - staticFields = in.readImmutableList(PainlessContextFieldInfo::new); - fields = in.readImmutableList(PainlessContextFieldInfo::new); + constructors = in.readCollectionAsImmutableList(PainlessContextConstructorInfo::new); + staticMethods = in.readCollectionAsImmutableList(PainlessContextMethodInfo::new); + methods = in.readCollectionAsImmutableList(PainlessContextMethodInfo::new); + staticFields = in.readCollectionAsImmutableList(PainlessContextFieldInfo::new); + fields = in.readCollectionAsImmutableList(PainlessContextFieldInfo::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(imported); - out.writeList(constructors); - out.writeList(staticMethods); - out.writeList(methods); - out.writeList(staticFields); - out.writeList(fields); + out.writeCollection(constructors); + out.writeCollection(staticMethods); + out.writeCollection(methods); + out.writeCollection(staticFields); + out.writeCollection(fields); } public static PainlessContextClassInfo fromXContent(XContentParser parser) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextConstructorInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextConstructorInfo.java index cb80283685bfd..e787731fe4915 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextConstructorInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextConstructorInfo.java @@ -62,7 +62,7 @@ public PainlessContextConstructorInfo(String declaring, List parameters) public PainlessContextConstructorInfo(StreamInput in) throws IOException { declaring = in.readString(); - parameters = in.readImmutableList(StreamInput::readString); + parameters = in.readCollectionAsImmutableList(StreamInput::readString); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInfo.java index 798b73a96ce14..2fb8afdc2d874 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInfo.java @@ -148,19 +148,19 @@ public PainlessContextInfo( public PainlessContextInfo(StreamInput in) throws IOException { name = in.readString(); - classes = in.readImmutableList(PainlessContextClassInfo::new); - importedMethods = in.readImmutableList(PainlessContextMethodInfo::new); - classBindings = in.readImmutableList(PainlessContextClassBindingInfo::new); - instanceBindings = in.readImmutableList(PainlessContextInstanceBindingInfo::new); + classes = in.readCollectionAsImmutableList(PainlessContextClassInfo::new); + importedMethods = in.readCollectionAsImmutableList(PainlessContextMethodInfo::new); + classBindings = in.readCollectionAsImmutableList(PainlessContextClassBindingInfo::new); + instanceBindings = in.readCollectionAsImmutableList(PainlessContextInstanceBindingInfo::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeList(classes); - out.writeList(importedMethods); - out.writeList(classBindings); - out.writeList(instanceBindings); + out.writeCollection(classes); + out.writeCollection(importedMethods); + out.writeCollection(classBindings); + out.writeCollection(instanceBindings); } public static PainlessContextInfo fromXContent(XContentParser parser) { diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInstanceBindingInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInstanceBindingInfo.java index 1d6888914c2c9..abc4304c8f370 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInstanceBindingInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextInstanceBindingInfo.java @@ -72,7 +72,7 @@ public PainlessContextInstanceBindingInfo(StreamInput in) throws IOException { declaring = in.readString(); name = in.readString(); rtn = in.readString(); - parameters = in.readImmutableList(StreamInput::readString); + parameters = in.readCollectionAsImmutableList(StreamInput::readString); } @Override diff --git a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextMethodInfo.java b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextMethodInfo.java index b666f1db89824..a3635d1328bc8 100644 --- a/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextMethodInfo.java +++ b/modules/lang-painless/src/main/java/org/elasticsearch/painless/action/PainlessContextMethodInfo.java @@ -71,7 +71,7 @@ public PainlessContextMethodInfo(StreamInput in) throws IOException { declaring = in.readString(); name = in.readString(); rtn = in.readString(); - parameters = in.readImmutableList(StreamInput::readString); + parameters = in.readCollectionAsImmutableList(StreamInput::readString); } @Override diff --git a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java index bbc24b74513f6..8437d78962c0c 100644 --- a/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java +++ b/modules/lang-painless/src/test/java/org/elasticsearch/painless/SimilarityScriptTests.java @@ -55,39 +55,39 @@ public void testBasics() throws IOException { Collections.emptyMap() ); ScriptedSimilarity sim = new ScriptedSimilarity("foobar", null, "foobaz", factory::newInstance, true); - Directory dir = new ByteBuffersDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); + try (Directory dir = new ByteBuffersDirectory()) { + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); - Document doc = new Document(); - doc.add(new TextField("f", "foo bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); - w.addDocument(doc); + Document doc = new Document(); + doc.add(new TextField("f", "foo bar", Store.NO)); + doc.add(new StringField("match", "no", Store.NO)); + w.addDocument(doc); - doc = new Document(); - doc.add(new TextField("f", "foo foo bar", Store.NO)); - doc.add(new StringField("match", "yes", Store.NO)); - w.addDocument(doc); + doc = new Document(); + doc.add(new TextField("f", "foo foo bar", Store.NO)); + doc.add(new StringField("match", "yes", Store.NO)); + w.addDocument(doc); - doc = new Document(); - doc.add(new TextField("f", "bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); - w.addDocument(doc); + doc = new Document(); + doc.add(new TextField("f", "bar", Store.NO)); + doc.add(new StringField("match", "no", Store.NO)); + w.addDocument(doc); - IndexReader r = DirectoryReader.open(w); - w.close(); - IndexSearcher searcher = newSearcher(r); - searcher.setSimilarity(sim); - Query query = new BoostQuery( - new BooleanQuery.Builder().add(new TermQuery(new Term("f", "foo")), Occur.SHOULD) - .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) - .build(), - 3.2f - ); - TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits.value); - assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); - w.close(); - dir.close(); + try (IndexReader r = DirectoryReader.open(w)) { + w.close(); + IndexSearcher searcher = newSearcher(r); + searcher.setSimilarity(sim); + Query query = new BoostQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("f", "foo")), Occur.SHOULD) + .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) + .build(), + 3.2f + ); + TopDocs topDocs = searcher.search(query, 1); + assertEquals(1, topDocs.totalHits.value); + assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); + } + } } public void testWeightScript() throws IOException { @@ -104,38 +104,38 @@ public void testWeightScript() throws IOException { Collections.emptyMap() ); ScriptedSimilarity sim = new ScriptedSimilarity("foobar", weightFactory::newInstance, "foobaz", factory::newInstance, true); - Directory dir = new ByteBuffersDirectory(); - IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); + try (Directory dir = new ByteBuffersDirectory()) { + IndexWriter w = new IndexWriter(dir, newIndexWriterConfig().setSimilarity(sim)); - Document doc = new Document(); - doc.add(new TextField("f", "foo bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); - w.addDocument(doc); + Document doc = new Document(); + doc.add(new TextField("f", "foo bar", Store.NO)); + doc.add(new StringField("match", "no", Store.NO)); + w.addDocument(doc); - doc = new Document(); - doc.add(new TextField("f", "foo foo bar", Store.NO)); - doc.add(new StringField("match", "yes", Store.NO)); - w.addDocument(doc); + doc = new Document(); + doc.add(new TextField("f", "foo foo bar", Store.NO)); + doc.add(new StringField("match", "yes", Store.NO)); + w.addDocument(doc); - doc = new Document(); - doc.add(new TextField("f", "bar", Store.NO)); - doc.add(new StringField("match", "no", Store.NO)); - w.addDocument(doc); + doc = new Document(); + doc.add(new TextField("f", "bar", Store.NO)); + doc.add(new StringField("match", "no", Store.NO)); + w.addDocument(doc); - IndexReader r = DirectoryReader.open(w); - w.close(); - IndexSearcher searcher = newSearcher(r); - searcher.setSimilarity(sim); - Query query = new BoostQuery( - new BooleanQuery.Builder().add(new TermQuery(new Term("f", "foo")), Occur.SHOULD) - .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) - .build(), - 3.2f - ); - TopDocs topDocs = searcher.search(query, 1); - assertEquals(1, topDocs.totalHits.value); - assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); - w.close(); - dir.close(); + try (IndexReader r = DirectoryReader.open(w)) { + w.close(); + IndexSearcher searcher = newSearcher(r); + searcher.setSimilarity(sim); + Query query = new BoostQuery( + new BooleanQuery.Builder().add(new TermQuery(new Term("f", "foo")), Occur.SHOULD) + .add(new TermQuery(new Term("match", "yes")), Occur.FILTER) + .build(), + 3.2f + ); + TopDocs topDocs = searcher.search(query, 1); + assertEquals(1, topDocs.totalHits.value); + assertEquals((float) (3.2 * 2 / 3), topDocs.scoreDocs[0].score, 0); + } + } } } diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml index 4418a7a602eae..291f014662995 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/140_dense_vector_basic.yml @@ -12,6 +12,7 @@ setup: properties: vector: type: dense_vector + index: false dims: 5 indexed_vector: type: dense_vector diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/145_dense_vector_byte_basic.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/145_dense_vector_byte_basic.yml index 71a43a44305d2..ed1f93ccc33db 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/145_dense_vector_byte_basic.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/145_dense_vector_byte_basic.yml @@ -14,6 +14,7 @@ setup: properties: vector: type: dense_vector + index: false element_type: byte dims: 5 indexed_vector: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/150_dense_vector_l1l2.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/150_dense_vector_l1l2.yml index d0e3d4dcdf0c5..bd96bed42b6e0 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/150_dense_vector_l1l2.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/150_dense_vector_l1l2.yml @@ -11,6 +11,7 @@ setup: mappings: properties: my_dense_vector: + index: false type: dense_vector dims: 5 - do: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/155_dense_vector_byte_l1l2.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/155_dense_vector_byte_l1l2.yml index cbf5dcae8b783..8b2a4f9887f95 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/155_dense_vector_byte_l1l2.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/155_dense_vector_byte_l1l2.yml @@ -13,6 +13,7 @@ setup: mappings: properties: my_dense_vector: + index: false type: dense_vector element_type: byte dims: 5 diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/160_dense_vector_special_cases.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/160_dense_vector_special_cases.yml index 7289628872c97..fc24964feafe2 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/160_dense_vector_special_cases.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/160_dense_vector_special_cases.yml @@ -13,6 +13,7 @@ setup: properties: vector: type: dense_vector + index: false dims: 3 indexed_vector: type: dense_vector diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/165_dense_vector_byte_special_cases.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/165_dense_vector_byte_special_cases.yml index 11531bd0c1461..4848b1d576458 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/165_dense_vector_byte_special_cases.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/165_dense_vector_byte_special_cases.yml @@ -15,6 +15,7 @@ setup: properties: vector: type: dense_vector + index: false element_type: byte dims: 3 indexed_vector: diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/170_dense_vector_script_access.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/170_dense_vector_script_access.yml index 38cb6fa856429..dd0cc8cde92b6 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/170_dense_vector_script_access.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/170_dense_vector_script_access.yml @@ -7,6 +7,7 @@ mappings: properties: v: + index: false type: dense_vector dims: 3 diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/175_dense_vector_byte_script_access.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/175_dense_vector_byte_script_access.yml index daa8344a1ee23..c361c0a082bcf 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/175_dense_vector_byte_script_access.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/175_dense_vector_byte_script_access.yml @@ -13,6 +13,7 @@ setup: properties: v: type: dense_vector + index: false element_type: byte dims: 3 diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/180_knn_and_binary_dv_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/180_knn_and_binary_dv_fields_api.yml index b141eca102e08..75d2ca8d3a7a9 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/180_knn_and_binary_dv_fields_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/180_knn_and_binary_dv_fields_api.yml @@ -14,6 +14,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -100,6 +101,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -186,6 +188,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -306,6 +309,7 @@ bdv: type: dense_vector dims: 3 + index: false knn: type: dense_vector dims: 3 @@ -423,6 +427,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -499,6 +504,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -601,6 +607,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -719,6 +726,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -817,6 +825,7 @@ properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector diff --git a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/185_knn_and_binary_byte_dv_fields_api.yml b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/185_knn_and_binary_byte_dv_fields_api.yml index 6867230ea9978..643f694e12c3b 100644 --- a/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/185_knn_and_binary_byte_dv_fields_api.yml +++ b/modules/lang-painless/src/yamlRestTest/resources/rest-api-spec/test/painless/185_knn_and_binary_byte_dv_fields_api.yml @@ -15,6 +15,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: @@ -99,6 +100,7 @@ setup: properties: bdv: type: dense_vector + index: false dims: 3 knn: type: dense_vector @@ -181,6 +183,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: @@ -298,6 +301,7 @@ setup: bdv: type: dense_vector element_type: byte + index: false dims: 3 knn: type: dense_vector @@ -411,6 +415,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: @@ -485,6 +490,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: @@ -583,6 +589,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: @@ -697,6 +704,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: @@ -792,6 +800,7 @@ setup: properties: bdv: type: dense_vector + index: false element_type: byte dims: 3 knn: diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/GeometryCollectionBuilder.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/GeometryCollectionBuilder.java index 0ebbbb3d6bf1d..44879fc01eff3 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/GeometryCollectionBuilder.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/GeometryCollectionBuilder.java @@ -51,7 +51,7 @@ public GeometryCollectionBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(shapes); + out.writeNamedWriteableCollection(shapes); } public GeometryCollectionBuilder shape(ShapeBuilder shape) { diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiLineStringBuilder.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiLineStringBuilder.java index da92929699737..9756856b072d7 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiLineStringBuilder.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiLineStringBuilder.java @@ -49,7 +49,7 @@ public MultiLineStringBuilder() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(lines); + out.writeCollection(lines); } public MultiLineStringBuilder linestring(LineStringBuilder line) { diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiPolygonBuilder.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiPolygonBuilder.java index 59ed15bbdca4e..be943c3f5066d 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiPolygonBuilder.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/MultiPolygonBuilder.java @@ -63,7 +63,7 @@ public MultiPolygonBuilder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { orientation.writeTo(out); - out.writeList(polygons); + out.writeCollection(polygons); } public Orientation orientation() { diff --git a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java index f9a01028f8650..46cc6a131415f 100644 --- a/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java +++ b/modules/legacy-geo/src/main/java/org/elasticsearch/legacygeo/builders/PolygonBuilder.java @@ -93,7 +93,7 @@ public PolygonBuilder(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { shell.writeTo(out); orientation.writeTo(out); - out.writeList(holes); + out.writeCollection(holes); } public Orientation orientation() { diff --git a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java index 28424a9e52891..0e9b293fe9fd0 100644 --- a/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java +++ b/modules/mapper-extras/src/main/java/org/elasticsearch/index/mapper/extras/RankFeatureQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MappedFieldType; @@ -414,6 +415,6 @@ protected int doHashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java index f191dfcda3bd5..57649129a638f 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ChildrenAggregationBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -181,6 +182,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java index c131be17c5e19..c5a285de70577 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/aggregations/ParentAggregationBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -179,6 +180,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java index 9738bbd43c31a..0892690b79c0a 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasChildQueryBuilder.java @@ -19,6 +19,7 @@ import org.apache.lucene.search.similarities.Similarity; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -541,6 +542,6 @@ protected void extractInnerHitBuilders(Map inner @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java index f3777c526994a..962916cc04645 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/HasParentQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -313,6 +314,6 @@ protected void extractInnerHitBuilders(Map inner @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java index 7d15bbcedf8fd..8fb72ddce1935 100644 --- a/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java +++ b/modules/parent-join/src/main/java/org/elasticsearch/join/query/ParentIdQueryBuilder.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.TermQuery; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -193,6 +194,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java index 1f9ad0136790c..eac647cfff634 100644 --- a/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java +++ b/modules/percolator/src/main/java/org/elasticsearch/percolator/PercolateQueryBuilder.java @@ -34,6 +34,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -216,12 +217,12 @@ protected PercolateQueryBuilder(String field, Supplier documentS super(in); field = in.readString(); name = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String documentType = in.readOptionalString(); assert documentType == null; } indexedDocumentIndex = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String indexedDocumentType = in.readOptionalString(); assert indexedDocumentType == null; } @@ -233,7 +234,7 @@ protected PercolateQueryBuilder(String field, Supplier documentS } else { indexedDocumentVersion = null; } - documents = in.readImmutableList(StreamInput::readBytesReference); + documents = in.readCollectionAsImmutableList(StreamInput::readBytesReference); if (documents.isEmpty() == false) { documentXContentType = in.readEnum(XContentType.class); } else { @@ -258,12 +259,12 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeString(field); out.writeOptionalString(name); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null documentType values out.writeOptionalString(null); } out.writeOptionalString(indexedDocumentIndex); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // In 7x, typeless percolate queries are represented by null indexedDocumentType values out.writeOptionalString(null); } @@ -667,6 +668,6 @@ public > IFD getForField( @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java index 5436aa41c285b..6ecb76ab63486 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/EvalQueryQuality.java @@ -43,7 +43,7 @@ public EvalQueryQuality(String id, double metricScore) { public EvalQueryQuality(StreamInput in) throws IOException { this.queryId = in.readString(); this.metricScore = in.readDouble(); - this.ratedHits = in.readList(RatedSearchHit::new); + this.ratedHits = in.readCollectionAsList(RatedSearchHit::new); this.optionalMetricDetails = in.readOptionalNamedWriteable(MetricDetail.class); } @@ -59,7 +59,7 @@ private EvalQueryQuality(String queryId, ParsedEvalQueryQuality builder) { public void writeTo(StreamOutput out) throws IOException { out.writeString(queryId); out.writeDouble(metricScore); - out.writeList(ratedHits); + out.writeCollection(ratedHits); out.writeOptionalNamedWriteable(this.optionalMetricDetails); } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java index d6a9a25be51f2..d62e6f7426cab 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.rankeval; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -45,7 +45,7 @@ public RankEvalRequest(RankEvalSpec rankingEvaluationSpec, String[] indices) { rankingEvaluationSpec = new RankEvalSpec(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { searchType = SearchType.fromId(in.readByte()); } } @@ -126,7 +126,7 @@ public void writeTo(StreamOutput out) throws IOException { rankingEvaluationSpec.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeByte(searchType.id()); } } diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java index 193735e2ad546..cc7397637e04a 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalResponse.java @@ -88,8 +88,8 @@ public String toString() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeDouble(metricScore); - out.writeMap(details, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - out.writeMap(failures, StreamOutput::writeString, StreamOutput::writeException); + out.writeMap(details, StreamOutput::writeWriteable); + out.writeMap(failures, StreamOutput::writeException); } @Override diff --git a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java index 89d5561f72ed0..590b6d38af79f 100644 --- a/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java +++ b/modules/rank-eval/src/main/java/org/elasticsearch/index/rankeval/RankEvalSpec.java @@ -96,9 +96,9 @@ public RankEvalSpec(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(ratedRequests); + out.writeCollection(ratedRequests); out.writeNamedWriteable(metric); - out.writeMap(templates, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(templates, StreamOutput::writeWriteable); out.writeVInt(maxConcurrentSearches); } diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java index caf09545bde1e..8878e988eb4fb 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/AsyncBulkByScrollActionTests.java @@ -89,6 +89,7 @@ import java.util.Set; import java.util.concurrent.Delayed; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -392,7 +393,7 @@ public void testThreadPoolRejectionsAbortRequest() throws Exception { worker.rethrottle(1); setupClient(new TestThreadPool(getTestName()) { @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String name) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { // While we're here we can check that the sleep made it through assertThat(delay.nanos(), greaterThan(0L)); assertThat(delay.seconds(), lessThanOrEqualTo(10L)); @@ -518,7 +519,7 @@ public void testScrollDelay() throws Exception { AtomicReference capturedCommand = new AtomicReference<>(); setupClient(new TestThreadPool(getTestName()) { @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String name) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { capturedDelay.set(delay); capturedCommand.set(command); return new ScheduledCancellable() { @@ -734,7 +735,7 @@ public void testCancelWhileDelayedAfterScrollResponse() throws Exception { */ setupClient(new TestThreadPool(getTestName()) { @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String name) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { /* * This is called twice: * 1. To schedule the throttling. When that happens we immediately cancel the task. @@ -745,7 +746,7 @@ public ScheduledCancellable schedule(Runnable command, TimeValue delay, String n if (delay.nanos() > 0) { generic().execute(() -> taskManager.cancel(testTask, reason, () -> {})); } - return super.schedule(command, delay, name); + return super.schedule(command, delay, executor); } }); diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java index e77fd22588ea5..e159d214c0dab 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/DeleteByQueryBasicTests.java @@ -244,6 +244,7 @@ public void testDeleteByQueryOnReadOnlyAllowDeleteIndex() throws Exception { if (diskAllocationDeciderEnabled == false) { // Disable the disk allocation decider to ensure the read_only_allow_delete block cannot be released setDiskAllocationDeciderEnabled(false); + refreshClusterInfo(); // ensures the logic for removing blocks upon disabling the decider is executed once } // When a read_only_allow_delete block is set on the index, // it will trigger a retry policy in the delete by query request because the rest status of the block is 429 diff --git a/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java b/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java index 6f0f04b2c864e..3d656bfa175d5 100644 --- a/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java +++ b/modules/reindex/src/test/java/org/elasticsearch/reindex/remote/RemoteScrollableHitSourceTests.java @@ -61,6 +61,7 @@ import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Queue; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; @@ -103,7 +104,7 @@ public ExecutorService executor(String name) { } @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String name) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor name) { command.run(); return null; } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index 828ff053dcbea..b4e6039aae1e4 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -123,7 +123,7 @@ public DeleteResult delete() throws IOException { @Override public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { - blobStore.deleteBlobs(new Iterator<>() { + blobStore.deleteBlobsIgnoringIfNotExists(new Iterator<>() { @Override public boolean hasNext() { return blobNames.hasNext(); diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 5f1d22fa9ecc7..70789c5568fbb 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -263,7 +263,8 @@ private static void filterDeleteExceptionsAndRethrow(Exception e, IOException ex throw exception; } - void deleteBlobs(Iterator blobs) throws IOException { + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobs) throws IOException { if (blobs.hasNext() == false) { return; } diff --git a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java index 6fd699bf5ff25..a3359e07119b5 100644 --- a/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java +++ b/modules/repository-azure/src/main/java/org/elasticsearch/repositories/azure/executors/ReactorScheduledExecutorService.java @@ -36,13 +36,11 @@ @SuppressForbidden(reason = "It wraps a ThreadPool and delegates all the work") public class ReactorScheduledExecutorService extends AbstractExecutorService implements ScheduledExecutorService { private final ThreadPool threadPool; - private final String executorName; private final ExecutorService delegate; private final Logger logger = LogManager.getLogger(ReactorScheduledExecutorService.class); public ReactorScheduledExecutorService(ThreadPool threadPool, String executorName) { this.threadPool = threadPool; - this.executorName = executorName; this.delegate = threadPool.executor(executorName); } @@ -54,14 +52,14 @@ public ScheduledFuture schedule(Callable callable, long delay, TimeUni } catch (Exception e) { throw new RuntimeException(e); } - }, new TimeValue(delay, unit), executorName); + }, new TimeValue(delay, unit), delegate); return new ReactorFuture<>(schedule); } public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { Runnable decoratedCommand = decorateRunnable(command); - Scheduler.ScheduledCancellable schedule = threadPool.schedule(decoratedCommand, new TimeValue(delay, unit), executorName); + Scheduler.ScheduledCancellable schedule = threadPool.schedule(decoratedCommand, new TimeValue(delay, unit), delegate); return new ReactorFuture<>(schedule); } @@ -75,11 +73,7 @@ public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDela } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { logger.debug( - () -> format( - "could not schedule execution of [%s] on [%s] as executor is shut down", - decoratedCommand, - executorName - ), + () -> format("could not schedule execution of [%s] on [%s] as executor is shut down", decoratedCommand, delegate), e ); } else { @@ -93,7 +87,7 @@ public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDela public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { Runnable decorateRunnable = decorateRunnable(command); - Scheduler.Cancellable cancellable = threadPool.scheduleWithFixedDelay(decorateRunnable, new TimeValue(delay, unit), executorName); + Scheduler.Cancellable cancellable = threadPool.scheduleWithFixedDelay(decorateRunnable, new TimeValue(delay, unit), delegate); return new ReactorFuture<>(cancellable); } diff --git a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java index 7d6ac118ceb01..76fade3c5afae 100644 --- a/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java +++ b/modules/repository-gcs/src/main/java/org/elasticsearch/repositories/gcs/GoogleCloudStorageBlobStore.java @@ -524,7 +524,8 @@ public String next() { * * @param blobNames names of the blobs to delete */ - void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { if (blobNames.hasNext() == false) { return; } diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 58c6586ccd044..0057f36d94cb8 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -13,13 +13,11 @@ import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; import com.amazonaws.services.s3.model.AmazonS3Exception; import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; -import com.amazonaws.services.s3.model.DeleteObjectsRequest; import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.ListMultipartUploadsRequest; import com.amazonaws.services.s3.model.ListNextBatchOfObjectsRequest; import com.amazonaws.services.s3.model.ListObjectsRequest; -import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.MultipartUpload; import com.amazonaws.services.s3.model.ObjectListing; import com.amazonaws.services.s3.model.ObjectMetadata; @@ -32,7 +30,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.action.support.RefCountingListener; @@ -70,12 +67,10 @@ import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.common.blobstore.support.BlobContainerUtils.getRegisterUsingConsistentRead; -import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.repositories.s3.S3Repository.MAX_FILE_SIZE; import static org.elasticsearch.repositories.s3.S3Repository.MAX_FILE_SIZE_USING_MULTIPART; import static org.elasticsearch.repositories.s3.S3Repository.MIN_PART_SIZE_USING_MULTIPART; @@ -84,12 +79,6 @@ class S3BlobContainer extends AbstractBlobContainer { private static final Logger logger = LogManager.getLogger(S3BlobContainer.class); - /** - * Maximum number of deletes in a {@link DeleteObjectsRequest}. - * @see S3 Documentation. - */ - private static final int MAX_BULK_DELETES = 1000; - private final S3BlobStore blobStore; private final String keyPath; @@ -357,55 +346,7 @@ public String next() { outstanding = blobNames; } - final List partition = new ArrayList<>(); - try (AmazonS3Reference clientReference = blobStore.clientReference()) { - // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes - final AtomicReference aex = new AtomicReference<>(); - SocketAccess.doPrivilegedVoid(() -> { - outstanding.forEachRemaining(key -> { - partition.add(key); - if (partition.size() == MAX_BULK_DELETES) { - deletePartition(clientReference, partition, aex); - partition.clear(); - } - }); - if (partition.isEmpty() == false) { - deletePartition(clientReference, partition, aex); - } - }); - if (aex.get() != null) { - throw aex.get(); - } - } catch (Exception e) { - throw new IOException("Failed to delete blobs " + partition.stream().limit(10).toList(), e); - } - } - - private void deletePartition(AmazonS3Reference clientReference, List partition, AtomicReference aex) { - try { - clientReference.client().deleteObjects(bulkDelete(blobStore, partition)); - } catch (MultiObjectDeleteException e) { - // We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead - // first remove all keys that were sent in the request and then add back those that ran into an exception. - logger.warn( - () -> format( - "Failed to delete some blobs %s", - e.getErrors().stream().map(err -> "[" + err.getKey() + "][" + err.getCode() + "][" + err.getMessage() + "]").toList() - ), - e - ); - aex.set(ExceptionsHelper.useOrSuppress(aex.get(), e)); - } catch (AmazonClientException e) { - // The AWS client threw any unexpected exception and did not execute the request at all so we do not - // remove any keys from the outstanding deletes set. - aex.set(ExceptionsHelper.useOrSuppress(aex.get(), e)); - } - } - - private static DeleteObjectsRequest bulkDelete(S3BlobStore blobStore, List blobs) { - return new DeleteObjectsRequest(blobStore.bucket()).withKeys(blobs.toArray(Strings.EMPTY_ARRAY)) - .withQuiet(true) - .withRequestMetricCollector(blobStore.deleteMetricCollector); + blobStore.deleteBlobsIgnoringIfNotExists(outstanding); } @Override @@ -808,7 +749,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener safeAbortMultipartUpload(currentUploadId))); } } @@ -820,7 +761,7 @@ void run(BytesReference expected, BytesReference updated, ActionListener 0) { threadPool.scheduleUnlessShuttingDown( TimeValue.timeValueMillis(TimeValue.timeValueSeconds(uploadIndex).millis() + Randomness.get().nextInt(50)), - ThreadPool.Names.SNAPSHOT, + blobStore.getSnapshotExecutor(), cancelConcurrentUpdates ); } else { diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java index b1db2b3e0aaef..f25ee58772859 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobStore.java @@ -8,16 +8,21 @@ package org.elasticsearch.repositories.s3; +import com.amazonaws.AmazonClientException; import com.amazonaws.Request; import com.amazonaws.Response; import com.amazonaws.metrics.RequestMetricCollector; import com.amazonaws.services.s3.model.CannedAccessControlList; +import com.amazonaws.services.s3.model.DeleteObjectsRequest; +import com.amazonaws.services.s3.model.MultiObjectDeleteException; import com.amazonaws.services.s3.model.StorageClass; import com.amazonaws.util.AWSRequestMetrics; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; @@ -28,13 +33,26 @@ import org.elasticsearch.threadpool.ThreadPool; import java.io.IOException; +import java.util.ArrayList; import java.util.HashMap; +import java.util.Iterator; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.core.Strings.format; class S3BlobStore implements BlobStore { + /** + * Maximum number of deletes in a {@link DeleteObjectsRequest}. + * @see S3 Documentation. + */ + private static final int MAX_BULK_DELETES = 1000; + private static final Logger logger = LogManager.getLogger(S3BlobStore.class); private final S3Service service; @@ -54,6 +72,7 @@ class S3BlobStore implements BlobStore { private final RepositoryMetadata repositoryMetadata; private final ThreadPool threadPool; + private final Executor snapshotExecutor; private final Stats stats = new Stats(); @@ -84,6 +103,7 @@ class S3BlobStore implements BlobStore { this.storageClass = initStorageClass(storageClass); this.repositoryMetadata = repositoryMetadata; this.threadPool = threadPool; + this.snapshotExecutor = threadPool.executor(ThreadPool.Names.SNAPSHOT); this.getMetricCollector = new IgnoreNoResponseMetricsCollector() { @Override public void collectMetrics(Request request) { @@ -128,6 +148,10 @@ public void collectMetrics(Request request) { }; } + public Executor getSnapshotExecutor() { + return snapshotExecutor; + } + public TimeValue getCompareAndExchangeTimeToLive() { return service.compareAndExchangeTimeToLive; } @@ -189,6 +213,59 @@ public BlobContainer blobContainer(BlobPath path) { return new S3BlobContainer(path, this); } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + final List partition = new ArrayList<>(); + try (AmazonS3Reference clientReference = clientReference()) { + // S3 API only allows 1k blobs per delete so we split up the given blobs into requests of max. 1k deletes + final AtomicReference aex = new AtomicReference<>(); + SocketAccess.doPrivilegedVoid(() -> { + blobNames.forEachRemaining(key -> { + partition.add(key); + if (partition.size() == MAX_BULK_DELETES) { + deletePartition(clientReference, partition, aex); + partition.clear(); + } + }); + if (partition.isEmpty() == false) { + deletePartition(clientReference, partition, aex); + } + }); + if (aex.get() != null) { + throw aex.get(); + } + } catch (Exception e) { + throw new IOException("Failed to delete blobs " + partition.stream().limit(10).toList(), e); + } + } + + private void deletePartition(AmazonS3Reference clientReference, List partition, AtomicReference aex) { + try { + clientReference.client().deleteObjects(bulkDelete(this, partition)); + } catch (MultiObjectDeleteException e) { + // We are sending quiet mode requests so we can't use the deleted keys entry on the exception and instead + // first remove all keys that were sent in the request and then add back those that ran into an exception. + logger.warn( + () -> format( + "Failed to delete some blobs %s", + e.getErrors().stream().map(err -> "[" + err.getKey() + "][" + err.getCode() + "][" + err.getMessage() + "]").toList() + ), + e + ); + aex.set(ExceptionsHelper.useOrSuppress(aex.get(), e)); + } catch (AmazonClientException e) { + // The AWS client threw any unexpected exception and did not execute the request at all so we do not + // remove any keys from the outstanding deletes set. + aex.set(ExceptionsHelper.useOrSuppress(aex.get(), e)); + } + } + + private static DeleteObjectsRequest bulkDelete(S3BlobStore blobStore, List blobs) { + return new DeleteObjectsRequest(blobStore.bucket()).withKeys(blobs.toArray(Strings.EMPTY_ARRAY)) + .withQuiet(true) + .withRequestMetricCollector(blobStore.deleteMetricCollector); + } + @Override public void close() throws IOException { this.service.close(); diff --git a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java index dc6a83ac293f2..7dddf07d0f23e 100644 --- a/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java +++ b/modules/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3Repository.java @@ -43,6 +43,7 @@ import java.util.Collection; import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -153,12 +154,12 @@ class S3Repository extends MeteredBlobStoreRepository { /** * Artificial delay to introduce after a snapshot finalization or delete has finished so long as the repository is still using the * backwards compatible snapshot format from before - * {@link org.elasticsearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} ({@link org.elasticsearch.Version#V_7_6_0}). + * {@link org.elasticsearch.snapshots.SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION} ({@link IndexVersion#V_7_6_0}). * This delay is necessary so that the eventually consistent nature of AWS S3 does not randomly result in repository corruption when * doing repository operations in rapid succession on a repository in the old metadata format. * This setting should not be adjusted in production when working with an AWS S3 backed repository. Doing so risks the repository * becoming silently corrupted. To get rid of this waiting period, either create a new S3 repository or remove all snapshots older than - * {@link org.elasticsearch.Version#V_7_6_0} from the repository which will trigger an upgrade of the repository metadata to the new + * {@link IndexVersion#V_7_6_0} from the repository which will trigger an upgrade of the repository metadata to the new * format and disable the cooldown period. */ static final Setting COOLDOWN_PERIOD = Setting.timeSetting( @@ -193,6 +194,8 @@ class S3Repository extends MeteredBlobStoreRepository { */ private final TimeValue coolDown; + private final Executor snapshotExecutor; + /** * Constructs an s3 backed repository */ @@ -214,6 +217,7 @@ class S3Repository extends MeteredBlobStoreRepository { buildLocation(metadata) ); this.service = service; + this.snapshotExecutor = threadPool().executor(ThreadPool.Names.SNAPSHOT); // Parse and validate the user's S3 Storage Class setting this.bucket = BUCKET_SETTING.get(metadata.settings()); @@ -331,7 +335,7 @@ public void onRepositoryDataWritten(RepositoryData repositoryData) { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); assert cancellable != null; listener.onRepositoryDataWritten(repositoryData); - }, coolDown, ThreadPool.Names.SNAPSHOT)); + }, coolDown, snapshotExecutor)); assert existing == null : "Already have an ongoing finalization " + finalizationFuture; } @@ -342,7 +346,7 @@ public void onFailure(Exception e) { final Scheduler.Cancellable cancellable = finalizationFuture.getAndSet(null); assert cancellable != null; listener.onFailure(e); - }, coolDown, ThreadPool.Names.SNAPSHOT)); + }, coolDown, snapshotExecutor)); assert existing == null : "Already have an ongoing finalization " + finalizationFuture; } }; @@ -364,11 +368,7 @@ private ActionListener delayedListener(ActionListener listener) { public void onResponse(T response) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule( - ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), - coolDown, - ThreadPool.Names.SNAPSHOT - ) + threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onResponse(response)), coolDown, snapshotExecutor) ); assert existing == null : "Already have an ongoing finalization " + finalizationFuture; } @@ -377,7 +377,7 @@ public void onResponse(T response) { public void onFailure(Exception e) { logCooldownInfo(); final Scheduler.Cancellable existing = finalizationFuture.getAndSet( - threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, ThreadPool.Names.SNAPSHOT) + threadPool.schedule(ActionRunnable.wrap(wrappedListener, l -> l.onFailure(e)), coolDown, snapshotExecutor) ); assert existing == null : "Already have an ongoing finalization " + finalizationFuture; } diff --git a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 2fb4a6a16923f..af462f0f4c723 100644 --- a/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/modules/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.concurrent.CountDown; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; @@ -154,7 +155,7 @@ protected BlobContainer createBlobContainer( S3Repository.STORAGE_CLASS_SETTING.getDefault(Settings.EMPTY), repositoryMetadata, BigArrays.NON_RECYCLING_INSTANCE, - null + new DeterministicTaskQueue().getThreadPool() ) ) { @Override diff --git a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java index ce37aca2d7a3d..327a66e94e6dd 100644 --- a/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java +++ b/modules/repository-url/src/main/java/org/elasticsearch/common/blobstore/url/URLBlobStore.java @@ -21,8 +21,10 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedFunction; +import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; +import java.util.Iterator; import java.util.List; /** @@ -105,6 +107,11 @@ public BlobContainer blobContainer(BlobPath blobPath) { } } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + throw new UnsupportedOperationException("Bulk deletes are not supported in URL repositories"); + } + @Override public void close() { // nothing to do here... diff --git a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java index 4ce93563ceb22..74559606b1de2 100644 --- a/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java +++ b/modules/rest-root/src/main/java/org/elasticsearch/rest/root/MainResponse.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterName; @@ -38,7 +39,7 @@ public class MainResponse extends ActionResponse implements ToXContentObject { MainResponse(StreamInput in) throws IOException { super(in); nodeName = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_500_041)) { + if (in.getTransportVersion().before(TransportVersions.V_8_500_041)) { Version.readVersion(in); } @@ -48,13 +49,13 @@ public class MainResponse extends ActionResponse implements ToXContentObject { // the lucene version was previously read by inferring from either Version or IndexVersion. // Now the lucene version is read explicitly. String wireLuceneVersion = null; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_037)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_037)) { wireLuceneVersion = in.readString(); } else { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_031)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_031)) { wireLuceneVersion = IndexVersion.readVersion(in).luceneVersion().toString(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_019)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { TransportVersion.readVersion(in); } } @@ -99,7 +100,7 @@ public Build getBuild() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(nodeName); - if (out.getTransportVersion().before(TransportVersion.V_8_500_041)) { + if (out.getTransportVersion().before(TransportVersions.V_8_500_041)) { Version.writeVersion(Version.CURRENT, out); } @@ -108,13 +109,13 @@ public void writeTo(StreamOutput out) throws IOException { // for those versions until the new format has propagated through serverless. Additionally, // the lucene version was previously inferred from either Version or IndexVersion. // Now the lucene version is written explicitly. - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_037)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_037)) { out.writeString(luceneVersion); } else { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_031)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_031)) { IndexVersion.writeVersion(IndexVersion.current(), out); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_019)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { TransportVersion.writeVersion(TransportVersion.current(), out); } } diff --git a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java index b65e8d1911418..7f310caf4ea05 100644 --- a/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java +++ b/modules/runtime-fields-common/src/main/java/org/elasticsearch/runtimefields/NamedGroupExtractor.java @@ -74,7 +74,7 @@ public GrokHelper(TimeValue interval, TimeValue maxExecutionTime) { interval.millis(), maxExecutionTime.millis(), threadPool::relativeTimeInMillis, - (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC) + (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), threadPool.generic()) ); })::getOrCompute; } diff --git a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java index eee4af9327f0b..37109bcf01f06 100644 --- a/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java +++ b/modules/systemd/src/main/java/org/elasticsearch/systemd/SystemdPlugin.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.env.Environment; import org.elasticsearch.env.NodeEnvironment; @@ -111,7 +112,7 @@ public Collection createComponents( if (rc < 0) { logger.warn("extending startup timeout via sd_notify failed with [{}]", rc); } - }, TimeValue.timeValueSeconds(15), ThreadPool.Names.SAME)); + }, TimeValue.timeValueSeconds(15), EsExecutors.DIRECT_EXECUTOR_SERVICE)); return List.of(); } diff --git a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java index d93c6eff3f061..a6e5f806babd2 100644 --- a/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java +++ b/modules/systemd/src/test/java/org/elasticsearch/systemd/SystemdPluginTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.systemd; import org.elasticsearch.Build; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -30,6 +31,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -47,8 +49,13 @@ public class SystemdPluginTests extends ESTestCase { { when(extender.cancel()).thenReturn(true); - when(threadPool.scheduleWithFixedDelay(any(Runnable.class), eq(TimeValue.timeValueSeconds(15)), eq(ThreadPool.Names.SAME))) - .thenReturn(extender); + when( + threadPool.scheduleWithFixedDelay( + any(Runnable.class), + eq(TimeValue.timeValueSeconds(15)), + same(EsExecutors.DIRECT_EXECUTOR_SERVICE) + ) + ).thenReturn(extender); } public void testIsEnabled() { diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java index 1ce8f721baf42..9a0d6692723e3 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/transport/netty4/Netty4Transport.java @@ -383,10 +383,10 @@ private void setupPipeline(Channel ch, boolean isRemoteClusterServerChannel) { pipeline.addLast("logging", ESLoggingHandler.INSTANCE); } pipeline.addLast("chunked_writer", new Netty4WriteThrottlingHandler(getThreadPool().getThreadContext())); - pipeline.addLast("dispatcher", new Netty4MessageInboundHandler(this, getInboundPipeline(isRemoteClusterServerChannel))); + pipeline.addLast("dispatcher", new Netty4MessageInboundHandler(this, getInboundPipeline(ch, isRemoteClusterServerChannel))); } - protected InboundPipeline getInboundPipeline(boolean isRemoteClusterServerChannel) { + protected InboundPipeline getInboundPipeline(Channel ch, boolean isRemoteClusterServerChannel) { return new InboundPipeline( getStatsTracker(), threadPool::relativeTimeInMillis, diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpHeaderValidatorTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpHeaderValidatorTests.java index 80356e2e37172..a2c034acdcb8d 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpHeaderValidatorTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpHeaderValidatorTests.java @@ -632,32 +632,36 @@ public void testFullRequestValidationSuccess() { assertThat(netty4HttpHeaderValidator.getState(), equalTo(WAITING_TO_START)); ByteBuf buf = channel.alloc().buffer(); - ByteBufUtil.copy(AsciiString.of("test full http request"), buf); - final DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/uri", buf); - channel.writeInbound(request); + try { + ByteBufUtil.copy(AsciiString.of("test full http request"), buf); + final DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/uri", buf); + channel.writeInbound(request); - // request got through to validation - assertThat(header.get(), sameInstance(request)); - // channel is paused - assertThat(channel.readInbound(), nullValue()); - assertFalse(channel.config().isAutoRead()); + // request got through to validation + assertThat(header.get(), sameInstance(request)); + // channel is paused + assertThat(channel.readInbound(), nullValue()); + assertFalse(channel.config().isAutoRead()); - // validation succeeds - listener.get().onResponse(null); - channel.runPendingTasks(); + // validation succeeds + listener.get().onResponse(null); + channel.runPendingTasks(); - // channel is resumed and waiting for next request - assertTrue(channel.config().isAutoRead()); - assertThat(netty4HttpHeaderValidator.getState(), equalTo(WAITING_TO_START)); + // channel is resumed and waiting for next request + assertTrue(channel.config().isAutoRead()); + assertThat(netty4HttpHeaderValidator.getState(), equalTo(WAITING_TO_START)); - DefaultFullHttpRequest throughRequest = channel.readInbound(); - // request goes through unaltered - assertThat(throughRequest, sameInstance(request)); - assertFalse(throughRequest.decoderResult().isFailure()); - // the content is unaltered - assertThat(new String(ByteBufUtil.getBytes(throughRequest.content()), StandardCharsets.UTF_8), is("test full http request")); - assertThat(buf.refCnt(), is(1)); - assertThat(throughRequest.decoderResult().cause(), nullValue()); + DefaultFullHttpRequest throughRequest = channel.readInbound(); + // request goes through unaltered + assertThat(throughRequest, sameInstance(request)); + assertFalse(throughRequest.decoderResult().isFailure()); + // the content is unaltered + assertThat(new String(ByteBufUtil.getBytes(throughRequest.content()), StandardCharsets.UTF_8), is("test full http request")); + assertThat(buf.refCnt(), is(1)); + assertThat(throughRequest.decoderResult().cause(), nullValue()); + } finally { + buf.release(); + } } public void testFullRequestWithDecoderException() { @@ -665,27 +669,31 @@ public void testFullRequestWithDecoderException() { assertThat(netty4HttpHeaderValidator.getState(), equalTo(WAITING_TO_START)); ByteBuf buf = channel.alloc().buffer(); - ByteBufUtil.copy(AsciiString.of("test full http request"), buf); - // a request with a decoder error prior to validation - final DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/uri", buf); - Exception cause = new ElasticsearchException("Boom"); - request.setDecoderResult(DecoderResult.failure(cause)); - channel.writeInbound(request); + try { + ByteBufUtil.copy(AsciiString.of("test full http request"), buf); + // a request with a decoder error prior to validation + final DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/uri", buf); + Exception cause = new ElasticsearchException("Boom"); + request.setDecoderResult(DecoderResult.failure(cause)); + channel.writeInbound(request); - // request goes through without invoking the validator - assertThat(header.get(), nullValue()); - assertThat(listener.get(), nullValue()); - // channel is NOT paused - assertTrue(channel.config().isAutoRead()); - assertThat(netty4HttpHeaderValidator.getState(), equalTo(WAITING_TO_START)); + // request goes through without invoking the validator + assertThat(header.get(), nullValue()); + assertThat(listener.get(), nullValue()); + // channel is NOT paused + assertTrue(channel.config().isAutoRead()); + assertThat(netty4HttpHeaderValidator.getState(), equalTo(WAITING_TO_START)); - DefaultFullHttpRequest throughRequest = channel.readInbound(); - // request goes through unaltered - assertThat(throughRequest, sameInstance(request)); - assertTrue(throughRequest.decoderResult().isFailure()); - assertThat(throughRequest.decoderResult().cause(), equalTo(cause)); - // the content is unaltered - assertThat(new String(ByteBufUtil.getBytes(throughRequest.content()), StandardCharsets.UTF_8), is("test full http request")); - assertThat(buf.refCnt(), is(1)); + DefaultFullHttpRequest throughRequest = channel.readInbound(); + // request goes through unaltered + assertThat(throughRequest, sameInstance(request)); + assertTrue(throughRequest.decoderResult().isFailure()); + assertThat(throughRequest.decoderResult().cause(), equalTo(cause)); + // the content is unaltered + assertThat(new String(ByteBufUtil.getBytes(throughRequest.content()), StandardCharsets.UTF_8), is("test full http request")); + assertThat(buf.refCnt(), is(1)); + } finally { + buf.release(); + } } } diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java index 028de6c98d432..c2727f206a07c 100644 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/transport/netty4/SimpleNetty4TransportTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -79,7 +80,7 @@ public void executeHandshake( super.executeHandshake(node, channel, profile, listener); } else { assert getVersion().equals(TransportVersion.current()); - listener.onResponse(TransportVersion.MINIMUM_COMPATIBLE); + listener.onResponse(TransportVersions.MINIMUM_COMPATIBLE); } } }; diff --git a/plugins/examples/settings.gradle b/plugins/examples/settings.gradle index ea6ecdb153f5b..3b6280dc3bcbd 100644 --- a/plugins/examples/settings.gradle +++ b/plugins/examples/settings.gradle @@ -7,7 +7,7 @@ */ plugins { - id "com.gradle.enterprise" version "3.13.1" + id "com.gradle.enterprise" version "3.14.1" } // Include all subdirectories as example projects diff --git a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java index b85acf5d328ce..1dc246cdeeb66 100644 --- a/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java +++ b/plugins/repository-hdfs/src/main/java/org/elasticsearch/repositories/hdfs/HdfsBlobStore.java @@ -17,6 +17,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import java.io.IOException; +import java.util.Iterator; final class HdfsBlobStore implements BlobStore { @@ -69,6 +70,11 @@ public BlobContainer blobContainer(BlobPath path) { return new HdfsBlobContainer(path, this, buildHdfsPath(path), bufferSize, securityContext, replicationFactor); } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + throw new UnsupportedOperationException("Bulk deletes are not supported in Hdfs repositories"); + } + private Path buildHdfsPath(BlobPath blobPath) { final Path path = translateToHdfsPath(blobPath); if (readOnly == false) { diff --git a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java index 7a9260b38bcda..fed4411f68768 100644 --- a/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java +++ b/plugins/repository-hdfs/src/test/java/org/elasticsearch/repositories/hdfs/HdfsBlobStoreRepositoryTests.java @@ -44,6 +44,11 @@ public void testSnapshotAndRestore() throws Exception { testSnapshotAndRestore(false); } + @Override + public void testBlobStoreBulkDeletion() throws Exception { + // HDFS does not implement bulk deletion from different BlobContainers + } + @Override protected Collection> nodePlugins() { return Collections.singletonList(HdfsPlugin.class); diff --git a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java index 39ad4a04528ba..8afc423a6bd74 100644 --- a/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java +++ b/qa/evil-tests/src/test/java/org/elasticsearch/threadpool/EvilThreadPoolTests.java @@ -21,6 +21,7 @@ import java.util.Optional; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.ScheduledThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -49,7 +50,7 @@ public void testExecutionErrorOnDefaultThreadPoolTypes() throws InterruptedExcep for (String executor : ThreadPool.THREAD_POOL_TYPES.keySet()) { checkExecutionError(getExecuteRunner(threadPool.executor(executor))); checkExecutionError(getSubmitRunner(threadPool.executor(executor))); - checkExecutionError(getScheduleRunner(executor)); + checkExecutionError(getScheduleRunner(threadPool.executor(executor))); } } @@ -158,7 +159,7 @@ public void testExecutionExceptionOnDefaultThreadPoolTypes() throws InterruptedE // here, it's ok for the exception not to bubble up. Accessing the future will yield the exception checkExecutionException(getSubmitRunner(threadPool.executor(executor)), false); - checkExecutionException(getScheduleRunner(executor), true); + checkExecutionException(getScheduleRunner(threadPool.executor(executor)), true); } } @@ -310,7 +311,7 @@ public String toString() { }; } - Consumer getScheduleRunner(String executor) { + Consumer getScheduleRunner(Executor executor) { return new Consumer() { @Override public void accept(Runnable runnable) { diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java index 1ee07a08d2f06..b3e384906a609 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/FullClusterRestartIT.java @@ -433,12 +433,12 @@ public void testClusterState() throws Exception { clusterState ); assertEquals("0", numberOfReplicas); - Version version = Version.fromId( + IndexVersion version = IndexVersion.fromId( Integer.valueOf( (String) XContentMapValues.extractValue("metadata.indices." + index + ".settings.index.version.created", clusterState) ) ); - assertEquals(getOldClusterVersion(), version); + assertEquals(getOldClusterIndexVersion(), version); } diff --git a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java index bb22311337cc0..4609b27db2909 100644 --- a/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java +++ b/qa/full-cluster-restart/src/javaRestTest/java/org/elasticsearch/upgrades/ParameterizedFullClusterRestartTestCase.java @@ -24,7 +24,7 @@ import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.OLD; import static org.elasticsearch.upgrades.FullClusterRestartUpgradeStatus.UPGRADED; -import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; @TestCaseOrdering(FullClusterRestartTestOrdering.class) public abstract class ParameterizedFullClusterRestartTestCase extends ESRestTestCase { @@ -85,11 +85,7 @@ public static IndexVersion getOldClusterIndexVersion() { if (version.equals(org.elasticsearch.Version.CURRENT)) { return IndexVersion.current(); } else { - assertThat( - "Index version needs to be added to restart test parameters", - version, - lessThanOrEqualTo(org.elasticsearch.Version.V_8_10_0) - ); + assertThat("Index version needs to be added to restart test parameters", version, lessThan(org.elasticsearch.Version.V_8_11_0)); return IndexVersion.fromId(version.id); } } diff --git a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java index 576168a76f40a..748cb2d95918e 100644 --- a/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java +++ b/qa/multi-cluster-search/src/test/java/org/elasticsearch/search/CCSDuelIT.java @@ -1075,7 +1075,6 @@ private static Map duelSearchSync(SearchRequest searchRequest, C assertEquals(clustersMRT.getSuccessful(), clustersMRTFalse.getSuccessful()); assertEquals(clustersMRT.getSkipped(), clustersMRTFalse.getSkipped()); - boolean removeSkipped = searchRequest.source().collapse() != null; Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { Map fanOutResponseMap = responseToMap(fanOutSearchResponse); @@ -1148,7 +1147,6 @@ private static Map duelSearchAsync(SearchRequest searchRequest, assertEquals(clustersMRT.getSuccessful(), clustersMRTFalse.getSuccessful()); assertEquals(clustersMRT.getSkipped(), clustersMRTFalse.getSkipped()); - boolean removeSkipped = searchRequest.source().collapse() != null; Map minimizeRoundtripsResponseMap = responseToMap(minimizeRoundtripsSearchResponse); if (clustersMRT.hasClusterObjects() && clustersMRTFalse.hasClusterObjects()) { Map fanOutResponseMap = responseToMap(fanOutSearchResponse); diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java index 74a8eb7fd1988..008a718be5873 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/AbstractRollingTestCase.java @@ -9,8 +9,11 @@ import org.elasticsearch.Version; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.rest.ESRestTestCase; +import static org.hamcrest.Matchers.lessThan; + public abstract class AbstractRollingTestCase extends ESRestTestCase { protected enum ClusterType { OLD, @@ -31,6 +34,16 @@ public static ClusterType parse(String value) { protected static final boolean FIRST_MIXED_ROUND = Boolean.parseBoolean(System.getProperty("tests.first_round", "false")); protected static final Version UPGRADE_FROM_VERSION = Version.fromString(System.getProperty("tests.upgrade_from_version")); + protected static IndexVersion getOldClusterIndexVersion() { + var version = UPGRADE_FROM_VERSION; + if (version.equals(org.elasticsearch.Version.CURRENT)) { + return IndexVersion.current(); + } else { + assertThat("Index version needs to be added to rolling test parameters", version, lessThan(org.elasticsearch.Version.V_8_11_0)); + return IndexVersion.fromId(version.id); + } + } + @Override protected final boolean resetFeatureStates() { return false; @@ -41,6 +54,11 @@ protected final boolean preserveIndicesUponCompletion() { return true; } + @Override + protected final boolean preserveDataStreamsUponCompletion() { + return true; + } + @Override protected final boolean preserveReposUponCompletion() { return true; diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java index 74ba81e9555e2..588802fb50709 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/FeatureUpgradeIT.java @@ -94,7 +94,7 @@ public void testGetFeatureUpgradeStatus() throws Exception { .orElse(Collections.emptyMap()); assertThat(feature, aMapWithSize(4)); - assertThat(feature.get("minimum_index_version"), equalTo(Integer.toString(UPGRADE_FROM_VERSION.id))); + assertThat(feature.get("minimum_index_version"), equalTo(getOldClusterIndexVersion().toString())); if (UPGRADE_FROM_VERSION.before(TransportGetFeatureUpgradeStatusAction.NO_UPGRADE_REQUIRED_VERSION)) { assertThat(feature.get("migration_status"), equalTo("MIGRATION_NEEDED")); } else { diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java new file mode 100644 index 0000000000000..19f24c97a47f8 --- /dev/null +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TsdbIT.java @@ -0,0 +1,305 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.upgrades; + +import org.elasticsearch.Version; +import org.elasticsearch.client.Request; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.common.time.FormatNames; +import org.elasticsearch.test.rest.ObjectPath; + +import java.io.IOException; +import java.time.Instant; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class TsdbIT extends AbstractRollingTestCase { + + private static final String TEMPLATE = """ + { + "settings":{ + "index": { + "mode": "time_series" + } + }, + "mappings":{ + "dynamic_templates": [ + { + "labels": { + "path_match": "pod.labels.*", + "mapping": { + "type": "keyword", + "time_series_dimension": true + } + } + } + ], + "properties": { + "@timestamp" : { + "type": "date" + }, + "metricset": { + "type": "keyword", + "time_series_dimension": true + }, + "k8s": { + "properties": { + "pod": { + "properties": { + "uid": { + "type": "keyword", + "time_series_dimension": true + }, + "name": { + "type": "keyword" + }, + "ip": { + "type": "ip" + }, + "network": { + "properties": { + "tx": { + "type": "long" + }, + "rx": { + "type": "long" + } + } + } + } + } + } + } + } + } + } + """; + private static final String BULK = + """ + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507","ip": "10.10.55.1", "network": {"tx": 2001818691, "rx": 802133794}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "hamster", "uid":"947e4ced-1786-4e53-9e0c-5c447e959508","ip": "10.10.55.1", "network": {"tx": 2005177954, "rx": 801479970}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"947e4ced-1786-4e53-9e0c-5c447e959509","ip": "10.10.55.1", "network": {"tx": 2006223737, "rx": 802337279}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "rat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959510","ip": "10.10.55.2", "network": {"tx": 2012916202, "rx": 803685721}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9","ip": "10.10.55.3", "network": {"tx": 1434521831, "rx": 530575198}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "tiger", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea10","ip": "10.10.55.3", "network": {"tx": 1434577921, "rx": 530600088}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "lion", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876e11","ip": "10.10.55.3", "network": {"tx": 1434587694, "rx": 530604797}}}} + {"create": {}} + {"@timestamp": "$now", "metricset": "pod", "k8s": {"pod": {"name": "elephant", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876eb4","ip": "10.10.55.3", "network": {"tx": 1434595272, "rx": 530605511}}}} + """; + + private static final String DOC = """ + { + "@timestamp": "$time", + "metricset": "pod", + "k8s": { + "pod": { + "name": "dog", + "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", + "ip": "10.10.55.3", + "network": { + "tx": 1434595272, + "rx": 530605511 + } + } + } + } + """; + + public void testTsdbDataStream() throws Exception { + assumeTrue( + "Skipping version [" + UPGRADE_FROM_VERSION + "], because TSDB was GA-ed in 8.7.0", + UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) + ); + String dataStreamName = "k8s"; + if (CLUSTER_TYPE == ClusterType.OLD) { + final String INDEX_TEMPLATE = """ + { + "index_patterns": ["$PATTERN"], + "template": $TEMPLATE, + "data_stream": { + } + }"""; + // Add composable index template + String templateName = "1"; + var putIndexTemplateRequest = new Request("POST", "/_index_template/" + templateName); + putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$TEMPLATE", TEMPLATE).replace("$PATTERN", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + + performOldClustertOperations(templateName, dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + performMixedClusterOperations(dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + performUpgradedClusterOperations(dataStreamName); + } + } + + public void testTsdbDataStreamWithComponentTemplate() throws Exception { + assumeTrue( + "Skipping version [" + UPGRADE_FROM_VERSION + "], because TSDB was GA-ed in 8.7.0 and bug was fixed in 8.11.0", + UPGRADE_FROM_VERSION.onOrAfter(Version.V_8_7_0) && UPGRADE_FROM_VERSION.before(Version.V_8_11_0) + ); + String dataStreamName = "template-with-component-template"; + if (CLUSTER_TYPE == ClusterType.OLD) { + final String COMPONENT_TEMPLATE = """ + { + "template": $TEMPLATE + } + """; + var putComponentTemplate = new Request("POST", "/_component_template/1"); + String template = TEMPLATE.replace("\"time_series\"", "\"time_series\", \"routing_path\": [\"k8s.pod.uid\"]"); + putComponentTemplate.setJsonEntity(COMPONENT_TEMPLATE.replace("$TEMPLATE", template)); + assertOK(client().performRequest(putComponentTemplate)); + final String INDEX_TEMPLATE = """ + { + "index_patterns": ["$PATTERN"], + "composed_of": ["1"], + "data_stream": { + } + }"""; + // Add composable index template + String templateName = "2"; + var putIndexTemplateRequest = new Request("POST", "/_index_template/" + templateName); + putIndexTemplateRequest.setJsonEntity(INDEX_TEMPLATE.replace("$PATTERN", dataStreamName)); + assertOK(client().performRequest(putIndexTemplateRequest)); + + performOldClustertOperations(templateName, dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.MIXED) { + performMixedClusterOperations(dataStreamName); + } else if (CLUSTER_TYPE == ClusterType.UPGRADED) { + performUpgradedClusterOperations(dataStreamName); + + var dataStreams = getDataStream(dataStreamName); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(2)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + { + var indices = getIndex(firstBackingIndex); + var escapedBackingIndex = firstBackingIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), nullValue()); + String startTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); + assertThat(startTime, nullValue()); + String endTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); + assertThat(endTime, nullValue()); + } + String secondBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.1.index_name"); + { + var indices = getIndex(secondBackingIndex); + var escapedBackingIndex = secondBackingIndex.replace(".", "\\."); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".data_stream"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.mode"), equalTo("time_series")); + String startTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.start_time"); + assertThat(startTime, notNullValue()); + String endTime = ObjectPath.evaluate(indices, escapedBackingIndex + ".settings.index.time_series.end_time"); + assertThat(endTime, notNullValue()); + } + } + } + + private void performUpgradedClusterOperations(String dataStreamName) throws Exception { + ensureGreen(dataStreamName); + var rolloverRequest = new Request("POST", "/" + dataStreamName + "/_rollover"); + assertOK(client().performRequest(rolloverRequest)); + + var dataStreams = getDataStream(dataStreamName); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(2)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + String secondBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.1.index_name"); + assertThat(secondBackingIndex, backingIndexEqualTo(dataStreamName, 2)); + indexDoc(dataStreamName); + assertSearch(dataStreamName, 10); + closeIndex(firstBackingIndex); + closeIndex(secondBackingIndex); + openIndex(firstBackingIndex); + openIndex(secondBackingIndex); + assertBusy(() -> { + try { + assertSearch(dataStreamName, 10); + } catch (Exception e) { + throw new AssertionError(e); + } + }); + } + + private static void performMixedClusterOperations(String dataStreamName) throws IOException { + ensureHealth(dataStreamName, request -> request.addParameter("wait_for_status", "yellow")); + if (FIRST_MIXED_ROUND) { + indexDoc(dataStreamName); + } + assertSearch(dataStreamName, 9); + } + + private static void performOldClustertOperations(String templateName, String dataStreamName) throws IOException { + var bulkRequest = new Request("POST", "/" + dataStreamName + "/_bulk"); + bulkRequest.setJsonEntity(BULK.replace("$now", formatInstant(Instant.now()))); + bulkRequest.addParameter("refresh", "true"); + var response = client().performRequest(bulkRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat("errors in response:\n " + responseBody, responseBody.get("errors"), equalTo(false)); + + var dataStreams = getDataStream(dataStreamName); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams"), hasSize(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.name"), equalTo(dataStreamName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.generation"), equalTo(1)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.template"), equalTo(templateName)); + assertThat(ObjectPath.evaluate(dataStreams, "data_streams.0.indices"), hasSize(1)); + String firstBackingIndex = ObjectPath.evaluate(dataStreams, "data_streams.0.indices.0.index_name"); + assertThat(firstBackingIndex, backingIndexEqualTo(dataStreamName, 1)); + assertSearch(dataStreamName, 8); + } + + private static void indexDoc(String dataStreamName) throws IOException { + var indexRequest = new Request("POST", "/" + dataStreamName + "/_doc"); + indexRequest.addParameter("refresh", "true"); + indexRequest.setJsonEntity(DOC.replace("$time", formatInstant(Instant.now()))); + var response = client().performRequest(indexRequest); + assertOK(response); + } + + private static void assertSearch(String dataStreamName, int expectedHitCount) throws IOException { + var searchRequest = new Request("GET", dataStreamName + "/_search"); + var response = client().performRequest(searchRequest); + assertOK(response); + var responseBody = entityAsMap(response); + assertThat(ObjectPath.evaluate(responseBody, "hits.total.value"), equalTo(expectedHitCount)); + } + + private static String formatInstant(Instant instant) { + return DateFormatter.forPattern(FormatNames.STRICT_DATE_OPTIONAL_TIME.getName()).format(instant); + } + + private static Map getDataStream(String dataStreamName) throws IOException { + var getDataStreamsRequest = new Request("GET", "/_data_stream/" + dataStreamName); + var response = client().performRequest(getDataStreamsRequest); + assertOK(response); + return entityAsMap(response); + } + + private static Map getIndex(String indexName) throws IOException { + var getIndexRequest = new Request("GET", "/" + indexName + "?human"); + var response = client().performRequest(getIndexRequest); + assertOK(response); + return entityAsMap(response); + } + +} diff --git a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java index 068747d5a4824..0f829f20fe3c4 100644 --- a/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java +++ b/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/UpgradeClusterClientYamlTestSuiteIT.java @@ -40,6 +40,11 @@ protected boolean preserveTemplatesUponCompletion() { return true; } + @Override + protected boolean preserveDataStreamsUponCompletion() { + return true; + } + public UpgradeClusterClientYamlTestSuiteIT(ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml index b471fa56a47a5..11e9fdc2cca95 100644 --- a/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml +++ b/qa/rolling-upgrade/src/test/resources/rest-api-spec/test/old_cluster/30_vector_search.yml @@ -11,6 +11,7 @@ bdv: type: dense_vector dims: 3 + index: false knn: type: dense_vector dims: 3 @@ -125,6 +126,7 @@ bdv: type: dense_vector element_type: byte + index: false dims: 3 knn: type: dense_vector diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml index f34aef9b83321..b4851ddd92469 100644 --- a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/40_knn_search.yml @@ -368,3 +368,123 @@ setup: filter: {"term": {"name": "cow.jpg"}} - length: {hits.hits: 0} +--- +"kNN search with dot-product auto-normalized": + - skip: + features: close_to + version: ' - 8.10.99' + reason: 'dense_vector auto-normalized dot_product in 8.11' + + - do: + indices.create: + index: test_dot_product + body: + mappings: + properties: + name: + type: keyword + dot_product_vector: + type: dense_vector + dims: 5 + index: true + similarity: dot_product + cosine_vector: + type: dense_vector + dims: 5 + index: true + similarity: cosine + + - do: + index: + index: test_dot_product + id: "1" + body: + name: cow.jpg + dot_product_vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] + cosine_vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] + + - do: + index: + index: test_dot_product + id: "2" + body: + name: moose.jpg + dot_product_vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + cosine_vector: [ -0.5, 100.0, -13, 14.8, -156.0 ] + + - do: + index: + index: test_dot_product + id: "3" + body: + name: rabbit.jpg + dot_product_vector: [ 0.5, 111.3, -13.0, 14.8, -156.0 ] + cosine_vector: [ 0.5, 111.3, -13.0, 14.8, -156.0 ] + + - do: + indices.refresh: { } + + - do: + search: + index: test_dot_product + body: + fields: [ "name" ] + knn: + field: dot_product_vector + query_vector: [-0.5, 90.0, -10, 14.8, -156.0] + k: 2 + num_candidates: 3 + + - match: {hits.total.value: 2} + - match: {hits.hits.0._id: "2"} + - set: { hits.hits.0._score: score_0 } + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: {hits.hits.1._id: "3"} + - set: { hits.hits.1._score: score_1 } + - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} + + - do: + search: + index: test_dot_product + body: + fields: [ "name" ] + knn: + field: cosine_vector + query_vector: [ -0.5, 90.0, -10, 14.8, -156.0 ] + k: 2 + num_candidates: 3 + + - match: {hits.total.value: 2} + - match: {hits.hits.0._id: "2"} + - close_to: { hits.hits.0._score: { value: $score_0, error: 0.00001 } } + - match: {hits.hits.0.fields.name.0: "moose.jpg"} + - match: {hits.hits.1._id: "3"} + - close_to: { hits.hits.1._score: { value: $score_1, error: 0.00001 } } + - match: {hits.hits.1.fields.name.0: "rabbit.jpg"} +--- +"kNN search fails with non-normalized dot-product in older versions": + - skip: + version: '8.10.99 - ' + reason: 'dense_vector auto-normalized dot_product in 8.11' + + - do: + indices.create: + index: test_failing_dot_product + body: + mappings: + properties: + dot_product_vector: + type: dense_vector + dims: 5 + index: true + similarity: dot_product + + - do: + catch: bad_request + index: + index: test_failing_dot_product + id: "1" + body: + name: cow.jpg + dot_product_vector: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml new file mode 100644 index 0000000000000..767e898792f20 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/60_dense_vector_dynamic_mapping.yml @@ -0,0 +1,494 @@ +setup: + - skip: + version: ' - 8.10.99' + reason: 'Dynamic mapping of floats to dense_vector was added in 8.11' + +--- +"Fields with float arrays below the threshold still map as float": + + - do: + index: + index: test-too-short-still-float + id: "1" + refresh: true + body: + my_field: [ 230.0, 300.33, -34.8988, 15.555, -200.0 ] + + - do: + indices.get_mapping: + index: test-too-short-still-float + + - match: { test-too-short-still-float.mappings.properties.my_field.type: float } + +--- +"Fields with float arrays above the threshold still map as float": + + - do: + index: + index: test-too-big-still-float + id: "1" + refresh: true + body: + my_field: [ + -457.1953,259.6788,271.9127,-26.8833,403.0915,-56.9197,-445.8869,-108.8167,417.8988,13.4232,-281.765,-405.8573,262.7831,-279.493,328.5591,-453.3941,-116.0368,435.4734,-439.0927,-332.9565,355.4955,324.9878,33.3519,-165.0182,188.1811,467.3455,185.1057,-233.8598,-17.6827,283.4271,-329.1247,-402.9721,404.7866,-358.7031,-267.4074,441.8363,320.2389,-128.0179,339.544,196.2018,-60.2688,336.0228,-440.1943,318.6882,-158.2596,277.0925,-487.4971,-338.9865,-275.716,136.8547,-253.6206,-40.2807,-357.0971,188.0344,-203.0674,449.9618,-223.2508,468.1441,302.4002,-65.0044,342.4431,205.6774,-118.636,-29.9706,183.9825,223.956,314.0691,137.0129,-8.0452,-15.131,-269.8643,-12.691,228.9777,-147.8384,-347.1117,-283.1905,459.2004,296.1321,-483.1799,414.3423,383.0187,-408.5525,-286.8169,482.5853,9.5232,-459.4968,-333.2521,109.0969,129.5107,43.4369,455.8283,-4.0423,-318.5019,339.1641,416.3581,-309.0429,84.2325,-355.8753,264.7671,43.8922,-298.6039,412.4413,19.4198,-251.279,-191.157,-478.2058,251.5709,-178.9633,479.293,188.399,380.9755,268.6575,120.3467,-322.0305,-255.4894,-377.515,56.9153,-133.9486,156.2546,-428.9581,-54.994,28.2146,158.7121,-426.7307,491.0086,-150.7205,-233.1005,244.5174,45.911,-406.1181,233.1636,175.9334,414.2805,421.7396,-322.8029,-252.2412,35.7622,318.5223,-141.5121,-375.4407,380.3081,222.1228,443.7844,367.377,-202.9594,-493.6231,-184.2242,-253.9838,463.1952,-416.3887,252.0867,-63.5317,411.0727,98.6261,330.7369,363.5685,-498.1848,-413.7246,-2.5996,-238.3547,-355.6041,-303.698,43.6266,383.1105,-72.3066,274.7491,321.9322,220.9543,-30.5578,400.0891,-181.7069,-386.4403,497.2206,-408.9611,138.485,-133.5666,-340.2569,-223.6313,270.884,-215.9399,74.3931,-244.1364,353.4219,-156.9905,488.3148,96.352,401.8525,-468.8344,129.9715,-27.1953,-168.631,187.7049,-336.5255,331.0652,204.3538,36.0182,366.8502,-468.6579,478.1409,-332.6136,-281.8499,63.7165,-458.8161,14.8894,-145.6397,267.1499,85.2025,326.3764,-419.6361,-133.9626,102.0618,443.3099,-207.9032,132.7032,234.001,-26.0754,105.6478,174.1252,-403.3511,-164.9714,-262.9344,-58.9668,357.6414,355.7508,-331.8443,153.5733,417.5712,260.7394,-150.1053,-435.6525,-364.1558,328.6183,-270.0863,107.1746,345.7998,480.8749,206.3896,-498.237,495.0835,481.9384,418.5571,-246.5213,-363.7304,311.7076,-53.1664,-297.3839,122.3105,-13.9226,-145.9754,-189.1748,460.9375,194.5417,-28.1346,-261.2177,-88.8396,-254.6407,-465.3148,-169.5377,24.3113,-116.2323,-420.3526,317.2107,-231.6227,-270.8239,387.8598,412.4251,428.1373,308.2044,275.2082,402.3663,-209.9843,-492.7269,225.1948,326.469,207.3557,-131.7677,371.9408,-139.3098,324.205,-126.6204,-335.0853,-248.2587,-344.907,307.2109,-441.3296,-318.027,414.6535,172.0537,-280.4991,331.0475,-158.0178,-285.1951,12.3632,149.9347,282.8302,-91.5624,-180.6097,496.0881,368.2567,357.6875,-194.2106,48.9213,-479.2956,-165.139,238.7811,302.7007,297.2805,208.7099,-5.5755,-85.7911,-358.1111,344.6131,415.7199,-219.1525,490.5003,-46.0096,498.2818,-91.8067,384.0104,396.1107,408.2827,-5.3919,-333.7992,-168.985,273.72,359.7125,227.7621,158.3406,-366.9722,3.7709,27.2728,71.9754,269.5792,-365.281,117.9152,-184.3682,356.9013,-142.6579,-496.7598,122.0194,89.1247,4.1914,-81.9905,465.0841,115.4727,169.6116,-199.9951,-223.3149,-447.3022,11.831,320.2368,105.1316,344.2462,8.6333,62.2285,-70.3944,-284.6694,-482.4229,-448.1569,-237.7858,222.3921,-172.1386,-312.5756,-390.0565,398.951,119.9784,-419.6537,121.3186,481.3011,-181.6662,-56.0219,424.1359,7.1461,138.8567,-307.0606,334.066,254.0897,473.7227,45.5936,133.7268,49.5334,-283.3406,179.4466,105.6191,-30.4162,271.5774,6.1156,110.4732,286.4325,13.3431,494.0139,-371.7624,283.3652,272.0558,-302.343,122.7245,-463.9261,299.9807,282.4502,-262.4911,183.4289,222.7474,-229.5973,141.6188,262.5468,278.1155,-331.0891,-393.6027,-230.1461,201.6657,-93.3604,-395.8877,-125.2013,-222.973,368.3759,234.6628,-28.6809,-151.0703,432.0315,253.1214,430.7065,-143.6963,499.84,85.1683,280.4354,196.6013,139.0476,120.8148,-398.8155,-335.5504,229.0516,403.8604,-383.9868,-79.975,-152.77,220.4036,135.0355,238.2176,-242.3085,-177.0743,381.8202,411.167,378.0153,456.5976,364.013,24.2316,-395.4659,-210.2581,138.7539,479.7398,-291.7797,-123.0491,188.9817,42.8931,-354.4479,358.853,-43.6168,-190.6656,-103.3037,47.8915,-358.5402,374.9758,493.9951,-427.2376,-119.1142,-453.2975,-326.2696,-212.8273,-142.2931,-179.795,355.77,-156.2903,331.2006,451.9252,185.2944,-96.1941,173.0447,345.2744,43.0151,381.7845,-143.4125,84.654,-208.7053,-293.141,333.6349,-80.472,-376.9817,214.6298,-43.0931,-254.7834,-421.6961,-368.844,467.5544,-418.61,-66.6824,-350.2671,348.8241,252.3495,41.8677,-128.869,90.0391,-136.7405,-136.7822,489.8074,-396.8204,63.8355,323.9557,-83.6674,451.263,152.8955,-291.7497,410.0787,-299.7468,51.34,-298.6066,-58.853,325.911,-281.9541,-15.3457,299.1325,-347.4959,388.407,343.1096,28.1816,24.3013,-111.3312,190.5583,279.9848,-479.8894,123.2182,233.8425,-466.2128,-134.7122,217.8674,432.9523,-186.799,-477.2512,-223.5514,64.274,141.5251,-161.2187,150.2791,-228.1087,81.172,451.0879,-230.3818,-304.9398,402.1081,199.1266,275.3423,-123.9548,-21.1815,-384.544,446.9626,208.9692,-337.4827,-58.1011,344.2642,230.2868,44.9176,245.9885,-284.1875,-351.6104,108.1289,459.649,191.4334,53.591,136.7139,10.5912,-15.8411,62.8305,448.5256,194.7705,-356.3214,84.4996,-133.2502,-358.6308,262.7949,219.8741,-355.3985,468.2922,243.7227,-408.3166,188.6111,-221.7264,-286.8234,-340.3046,-224.5375,332.2615,73.2788,-24.7857,-485.2204,-136.7196,-162.9693,92.6017,-99.611,-186.5203,495.5483,240.8051,409.6493,-58.1321,-154.1239,-335.9719,-82.4408,-471.3057,-43.373,301.0884,-96.6359,-236.6906,435.7313,-227.7263,-406.8904,-392.3187,169.0043,-371.0852,-271.3652,-57.4466,-196.8455,52.741,361.7395,-117.8599,190.5339,276.6457,-321.9851,425.881,-473.2662,-74.2968,221.3612,-465.4429,181.723,-78.4508,21.6152,148.8107,-166.1687,-281.6391,-462.3636,-420.5255,-161.4143,98.8383,-374.5345,-366.2851,187.1506,-405.1865,239.4847,-246.8352,33.1748,-344.1211,477.9759,-294.1354,-359.5015,-44.8454,151.7072,-22.7324,-260.3293,99.1414,-20.5536,173.3766,-422.6692,458.3853,-199.7898,-236.3929,365.2599,-66.4191,388.3472,283.0336,-268.9463,269.5704,360.9679,-322.102,-407.0705,-93.0994,338.9108,-189.1359,-216.9102,-249.0153,122.6058,-254.8318,-112.2771,-279.0506,-168.4431,392.888,394.7607,468.0544,340.1852,-293.1288,-8.2912,-419.2608,323.3382,-93.8793,-242.0672,427.7716,-441.6906,128.3229,424.4679,-71.8586,134.5411,-74.5205,18.4141,17.7277,126.9123,-137.6119,33.3783,222.9912,-279.3582,89.1226,-90.031,12.7221,98.7767,-80.2372,-485.9212,-481.6575,-325.9729,318.8005,-433.786,-296.6337,421.6515,-27.2786,-445.2456,451.8876,-482.1014,-143.1098,186.1258,-90.2432,-297.7479,-351.0026,-423.7518,-219.6096,-269.2043,33.5767,-325.4335,392.4866,-418.243,112.5852,-248.1306,451.2154,-419.2995,154.5752,483.6323,-315.962,-196.872,406.1769,-356.9868,67.5251,-255.6475,103.5181,-450.4418,386.9518,456.4057,99.4591,-166.636,275.5374,200.4925,99.7623,292.6794,-422.3998,419.4837,-466.548,-462.8519,-381.4489,472.8356,-129.9563,441.4941,-376.1232,-114.1945,233.5531,313.6963,394.9503,-278.7558,350.7515,47.9427,220.7074,-178.9789,-346.0485,-128.5665,8.9461,159.9838,-57.3637,351.9478,-65.9411,-258.1788,498.9494,-472.613,-428.5678,17.3981,-435.3682,-421.155,-54.9177,-490.2348,178.3777,-31.9618,-242.1805,362.3736,380.8179,446.4272,-23.9142,61.3588,-489.5704,363.6446,-186.1519,-351.8684,-322.2791,-226.0431,404.6996,203.9824,306.0958,234.0145,-180.4996,452.0633,257.171,-83.6197,-393.152,396.6934,32.156,-428.7645,183.7886,494.767,68.3905,278.9785,-40.4759,261.7298,236.5778,4.5577,-130.9582,433.2837,-298.1139,-107.9822,-196.8446,-121.1765,-292.5509,-246.4546,-258.6038,280.1334,-52.6511,483.2928,-185.7577,-75.3705,351.3411,179.1282,-479.3838,166.2733,-197.9043,282.6848,-50.4744,-492.7178,183.6435,-127.2379,483.646,433.0805,-228.5488,139.8314,-145.1337,-403.1749,306.2704,122.7149,479.6928,85.3866,108.095,-224.152,494.6848,-368.4504,-180.7579,61.7136,51.2045,-383.0103,-376.4816,-292.8217,-201.118,332.1516,425.2758,138.1284,-229.4302,432.9081,2.9898,-437.7631,-448.2151,129.9126,-170.2405,499.0396,-48.2137,363.8046,-423.2511,-28.0804,-267.826,-356.6288,-99.9371,-409.8465,170.4902,-269.2584,-277.4098,300.8819,-142.5889,339.0952,16.2275,-310.8646,201.0733,-495.5905,341.9279,-149.1184,-494.4928,-81.7343,209.9762,273.4892,380.3163,359.2424,-242.5,-42.1268,-303.9792,11.6018,361.5483,416.4178,10.3282,195.9796,148.8096,-60.9724,-205.5221,-145.4574,-341.5913,426.8996,-19.5843,60.6265,-133.4191,-139.8737,281.7465,461.2854,-270.8902,61.0182,-58.6791,-254.0193,-234.1206,-208.7334,39.7498,-14.337,-68.2319,-342.2756,403.6834,401.6122,-166.1637,47.3592,-325.7,274.5459,343.4873,328.3783,-370.1657,-122.8967,-231.3182,122.6609,119.2685,-223.5437,-210.8076,116.5022,340.2814,256.1852,-217.3487,-150.9598,331.1343,-453.8182,-448.0842,-95.2475,-340.9942,-416.7835,-96.7226,-328.7212,-373.4337,472.2214,-484.522,-465.1583,330.0712,73.2052,-55.1266,-352.8984,341.0742,-230.4845,321.0752,236.2116,35.1902,75.3489,-469.4042,110.2036,35.1156,454.7224,103.0685,-221.7499,-23.6898,-259.2362,-110.509,-261.0039,219.2391,-139.9404,155.7723,377.9713,434.0318,-365.1397,459.1471,-318.5774,323.4256,194.325,-311.9529,-153.9019,-346.5811,76.4069,443.2121,-199.407,495.6636,-138.5213,-145.3432,-151.7758,-365.3547,263.6507,-491.1686,-183.5585,-12.6044,318.5346,-443.8639,-179.0338,477.9093,-355.5118,-423.0035,-229.1166,-96.7782,-479.2384,192.9085,223.3407,-302.9472,297.3847,477.584,-297.5958,168.6023,-80.6912,-89.8717,87.1476,-129.7807,346.5576,-253.9729,-399.6858,-389.5785,35.1648,-180.451,-49.6084,83.9582,-185.2329,97.283,195.5249,-91.6969,199.202,-449.792,333.4825,-113.7558,443.434,394.3587,-94.9074,71.2092,-251.1774,-85.047,-46.4004,20.2595,341.1073,-91.2527,86.3775,303.1247,-336.9011,343.9894,-384.1261,154.4411,-465.2493,-63.3249,488.0231,348.6725,458.2093,322.401,220.2532,283.3734,-386.4252,-256.5262,-87.2205,96.8199,47.6908,-399.6307,214.7716,-19.9177,-458.513,-194.3218,-320.5342,-275.857,-301.6955,-84.9038,358.3475,-88.9271,499.7721,-161.7403,355.4894,313.6211,-176.1703,61.8427,107.603,-176.063,-426.5408,292.3612,58.3331,-115.8853,471.4131,-76.4815,-309.6263,361.4518,192.4763,-145.7968,256.3888,133.335,-474.0901,-366.9793,-495.223,457.2366,170.056,285.0152,89.8213,225.2251,354.1822,-298.374,-332.9164,-55.2409,306.9283,25.9392,218.0624,7.5085,-151.8768,-155.4932,6.0001,201.4506,-259.9874,485.1078,-362.8516,-230.1434,-398.2512,243.0012,32.302,-197.91,144.1195,-89.4196,-44.0399,-371.7866,227.6007,492.7526,499.3824,162.2475,279.0325,177.0781,341.0137,199.6009,108.1678,312.2319,-211.5001,-92.675,357.0513,-337.924,-348.984,-350.3677,173.3473,-193.7346,-318.5609,-2.0928,46.6287,-346.8513,36.634,-277.4949,-149.325,481.1378,370.3864,-139.6689,-332.2805,48.0292,109.8363,494.6994,373.6992,495.7442,400.4998,-26.2276,-308.7669,188.9497,257.9182,-116.6944,269.8932,197.005,123.1139,-356.2058,485.1982,-4.0119,397.8434,-204.67,-494.5133,-414.1299,142.1512,-36.5446,390.0718,6.9876,263.1216,457.5598,89.6086,-266.3804,17.3457,88.8182,236.6271,81.175,-170.2249,-5.7664,422.7852,180.3349,-135.2642,149.2285,-70.6607,-46.169,-389.3313,230.6125,388.4853,-438.3426,111.8034,300.0416,37.5604,-437.3868,-114.1336,312.7777,-99.1161,-312.9015,-147.3787,-434.0536,19.5034,141.706,-281.4504,-208.9608,281.4619,-361.0596,-464.2757,77.8205,232.5575,165.4104,424.8738,124.5555,342.038,86.7543,278.0216,311.2686,337.834,-90.0545,-210.1143,-488.4095,-80.7535,92.3731,-122.622,-288.0571,1.7285,-5.2998,100.0717,-395.0571,-477.5587,-160.5642,-119.4214,-232.233,415.7276,-204.3216,-436.7766,-103.4644,-427.0939,-31.0927,-440.2919,120.5971,-223.3623,-199.0988,304.8697,432.5731,-231.5791,-397.696,306.4134,330.1018,32.4345,-175.719,464.6091,-291.5686,300.1631,-167.4592,238.9574,104.5893,-187.2215,-294.0111,-361.9094,480.6847,-304.2133,-448.7144,67.7235,-255.9669,254.7379,464.5465,6.8909,-368.7554,337.5993,39.1928,-376.0625,433.4224,-109.1488,341.7731,377.843,446.839,-192.283,251.1592,437.6812,-478.3409,345.7668,377.965,125.6188,-462.0904,-235.3324,316.8892,-460.7371,248.9306,418.7082,-333.7257,-104.5062,-408.1356,148.6624,-158.4929,-477.0664,80.4926,-214.6292,211.3377,322.7854,-312.851,403.0215,-213.3089,-71.3355,-276.1068,-293.0902,-277.4559,54.2176,-119.1285,-479.4361,-492.6072,8.3732,42.4988,-5.576,-198.6151,-357.0952,-331.5667,186.6195,317.3075,201.267,-37.1731,-278.3164,-467.7796,-163.3909,-117.305,-233.9266,277.7969,181.9723,178.8292,-168.7152,-436.041,171.345,369.0302,423.7144,434.0961,-428.1816,23.7334,-136.6735,-222.4486,180.8461,57.5968,129.2984,127.1866,-109.3928,-143.6253,-385.9948,127.9867,-8.8096,-239.844,66.6491,-50.7301,-309.1113,-474.6991,212.1767,-444.4596,-211.3601,351.3551,335.0507,-128.6226,-98.5249,-257.454,489.8014,-378.8622,311.0304,-4.9107,362.7586,-458.8825,373.2779,-103.29,-5.6216,122.0183,76.9731,17.8771,289.8893,-56.4338,375.9665,-83.9991,440.0823,142.2309,-471.0813,-59.4847,-400.4217,91.4892,374.4009,486.8697,414.5213,-0.3535,-278.2345,-231.206,-238.479,389.3143,-276.9742,-33.9869,349.1201,127.3928,-410.7213,337.3789,36.4048,333.4291,-12.4075,483.8778,311.4489,-74.0628,-379.6051,463.234,157.5614,-140.9455,120.7926,-161.2341,194.162,-412.6181,-9.1258,-194.5065,441.1572,255.5455,-73.8086,-119.4013,-486.4792,-27.4352,98.9738,-119.002,-75.5589,261.7675,156.0993,89.6457,-190.6318,429.9325,195.9536,-172.6155,-22.7976,438.9412,-246.4661,447.7281,434.5346,405.8957,217.3324,392.6129,-158.604,15.8632,483.0414,334.7693,-307.2482,302.1267,-7.4125,3.8081,-405.7316,377.5069,51.2307,235.0695,269.737,-389.3487,186.4225,-36.8521,401.2051,-59.0378,-190.8023,-182.8076,-362.6136,-124.8064,362.4142,45.3344,-330.1214,-162.5452,-434.4411,219.1143,-374.1038,364.5639,-268.582,-22.9247,-73.8849,-54.5258,-23.0882,167.9233,-181.9807,-207.1173,300.2193,206.5903,-72.013,-244.4396,-435.5389,10.3523,-435.3545,-138.8392,449.8426,-244.8971,229.7666,267.5225,-401.6021,466.3278,418.3623,-317.8205,28.5192,384.5628,-79.6177,469.4532,-395.1986,-353.4477,-93.6914,70.3999,-441.0627,-201.1221,141.2748,433.3389,82.413,-394.0046,-438.6836,453.4704,-160.6535,353.0374,-238.0377,236.5195,497.9019,202.9472,-421.6417,-382.042,84.6308,430.1599,-390.9918,-195.0401,255.6526,-86.5964,-491.667,-199.1557,-102.7114,474.877,-292.9154,-77.3163,143.5625,58.8126,-284.8908,-457.6457,212.5317,480.4032,-324.0829,491.0165,-494.7934,267.4311,-142.2401,-368.9058,-370.4955,498.803,-6.7377,-395.373,177.8868,306.9761,80.4185,-239.1253,-435.1349,7.6298,-157.6242,348.6095,475.7845,317.7116,-353.7336,-40.2881,353.7096,-60.9783,-385.5816,243.8071,-398.8341,62.343,340.0251,-24.8105,-343.4186,189.6737,-467.3026,104.7127,159.5467,-482.5496,71.6951,-163.5304,-321.8438,185.2875,-331.6885,-102.6817,-242.7548,-259.4407,220.6898,231.6571,-297.1145,-186.9472,-316.9286,-36.2392,-293.964,296.3878,467.7409,-277.6389,493.2143,417.1244,12.241,-343.7893,-33.7207,457.2978,-248.9726,-409.5439,-92.4779,-173.7584,400.8483,59.7439,13.3265,-175.617,37.333,-307.6469,-82.3687,332.578,-412.0079,144.7037,350.6506,423.3235,-53.2147,67.9581,-447.3845,-461.0187,371.1702,386.2045,352.2722,-119.098,123.9178,-52.0535,465.2626,474.0272,402.9961,491.4763,-33.1373,-228.8607,-383.3299,408.8192,-275.155,489.8633,-349.5073,346.9781,129.3929,282.1868,-77.3384,277.3026,412.3277,263.6705,473.3756,-437.9988,114.1686,-452.3331,-167.8898,-193.6217,444.6168,-354.3223,-238.0967,432.0883,-349.7249,-42.3659,-304.7343,296.2192,-136.5386,-121.7774,450.4678,140.5384,-450.8993,93.8942,-54.4945,498.521,-461.7182,111.5166,-397.6007,-397.959,-20.9331,-19.7068,78.551,161.9472,-24.8682,-434.4537,102.9447,214.298,-494.3813,211.6782,64.8196,372.6962,-399.8337,114.5476,-191.0045,-369.6465,-391.7201,-204.9951,-201.7654,475.898,-262.3247,-348.6974,79.4062,-112.4281,-102.266,67.3008,335.485,68.4289,-433.9104,-392.963,-73.3788,276.5766,-105.2219,422.6201,192.915,-388.3541,242.3915,479.5633,42.5998,259.6189,-316.5861,390.1121,-216.0274,-373.296,103.7169,321.9107,19.0023,487.2627,151.6922,276.7424,461.6928,24.4758,133.263,-47.289,-413.9538,435.2414,-466.9724,-270.6602,238.9442,-110.5389,403.5151,-395.4393,-208.2219,-53.0773,-26.5792,-387.6534,-120.5566,143.2237,-305.3778,442.0665,417.9523,460.3337,254.8689,-375.9436,-101.0153,232.4727,-35.5285,-470.3007,-423.9161,-108.9997,-29.6555,233.1043,240.4766,404.763,276.8465,-354.4058,74.0678,-343.244,332.9786,361.2964,-322.0828,-41.1861,-122.8074,-299.5682,-481.218,-157.3994,310.6317,-261.176,310.2644,-239.9855,255.1004,-311.3351,437.9486,78.1311,-133.9261,-176.2119,45.9943,492.3169,266.5795,16.8553,-470.9413,-331.2718,218.4122,369.7118,-179.3201,-165.7277,-87.9832,357.6499,-261.0345,442.1609,113.2997,-112.5643,481.2426,-365.4958,400.5374,-395.085,303.8103,-292.0268,167.0744,-199.013,174.9283,498.3585,-337.466,303.9078,-326.0901,-331.7143,6.7189,-277.1371,-204.9097,-313.4259,-462.7296,437.8485,267.2872,157.752,143.8784,60.1304,-492.991,326.0132,-123.3415,390.8461,-293.0175,483.4759,240.4338,271.6879,483.4801,391.2687,238.3995,-246.607,-411.7722,-257.9864,238.0949,494.3455,-489.0838,-26.7283,317.1161,-264.0242,-16.6819,-141.4839,429.101,252.2336,-325.1541,471.044,452.352,7.4546,343.3004,-336.4424,489.6317,307.1831,-139.2075,153.572,-332.5617,-361.892,110.6459,-384.8117,-423.0834,-277.9929,44.5303,167.9458,364.1204,-222.5008,-148.7923,198.4694,-74.0043,-458.4327,-227.5346,272.4441,-477.2587,303.1998,72.3129,112.9422,-98.2577,296.903,-489.0569,-461.4503,-381.6239,-440.6212,-354.1834,356.1583,-220.6533,192.5295,-409.0818,-264.2973,498.2192,-306.675,-313.6103,-124.9266,-436.5922,297.9051,121.9351,425.3888,-283.9925,-360.441,-347.4517,8.6814,477.4163,-344.6926,-311.574,-199.9541,-272.862,-360.8642,-306.0856,-218.9529,200.1938,-187.9337,-149.341,-431.5156,-135.3958,131.1299,262.0532,-210.162,353.4392,-249.2969,216.4223,499.6139,215.8176,-346.1569,177.2202,-173.1132,-466.9007,-310.9848,463.485,6.516,-334.8823,-282.7409,-375.2367,-127.4937,257.2427,384.9285,206.4053,-283.9167,369.6312,-325.1146,452.7523,-103.9792,-51.036,153.325,-344.1749,289.4824,109.8308,375.2284,-249.8481,367.8478,71.0143,471.6136,-265.6336,12.9061,-470.1288,-113.547,38.8925,-205.7232,418.6063,475.6095,-18.8731,-431.5545,-288.6452,-406.8928,79.4828,-152.1474,345.565,-200.8038,174.7789,379.2991,-385.1188,-217.6888,241.9077,-449.1824,467.832,186.0095,-82.8376,-450.7827,-32.2903,-288.132,169.8581,-275.3198,-388.1222,-431.3601,64.9652,368.9351,107.4999,408.8666,267.7858,-462.4349,-198.4615,378.1182,252.7529,-344.883,-364.0161,-124.6144,-222.8902,-103.7114,387.1701,-363.7944,-237.934,230.2082,-63.1276,-456.8188,361.9248,461.0643,160.8127,305.6079,81.2236,-322.0002,-273.4727,-356.9758,227.4751,278.5386,-10.8627,49.6988,-495.2527,428.0901,393.6169,-360.5547,-137.0244,26.962,-326.3379,-399.4972,449.7645,-238.7444,-69.8461,222.6126,-68.7657,132.7567,255.7355,-190.3762,271.6129,405.5764,115.8834,0.9645,331.1665,396.4585,217.4435,-323.6914,39.5915,282.4489,411.3888,-219.2131,240.8913,-109.5264,-438.3067,-157.3961,-180.7485,-258.9153,61.7008,483.4718,-386.0406,-499.1824,-90.2675,-358.5152,-79.3051,-97.4094,-91.7246,63.539,-307.0526,226.416,-454.475,-375.7449,300.532,409.7526,7.7042,-320.297,-244.9896,-282.6645,-414.9866,-331.4623,316.162,348.8361,-342.8609,477.2374,6.5636,-483.931,341.3556,498.2318,-46.3428,203.981,101.2793,128.4547,-285.068,56.5149,-407.6478,-151.4672,116.6673,-115.0498,-491.7974,-151.9475,474.7827,-288.4179,286.4447,-430.6331,-279.1458,318.721,-276.8375,157.9586,-9.2346,398.8374,380.2256,61.1557,13.0746,-80.139,-134.8798,-37.6466,-209.7381,236.1511,388.5629,-196.1123,-481.5887,327.8334,408.2074,479.1439,85.082,227.7623,250.2644,-47.8238,464.8471,-431.5099,489.9794,452.9999,-50.8695,-429.0862,-138.8555,-395.3346,391.3405,-249.4682,-280.6761,-460.5297,1.0129,199.1008,-97.4134,-235.0172,-466.1287,-302.7993,298.4108,-22.478,173.9936,122.8033,-235.0353,231.5057,-97.2265,-203.8224,457.6806,484.1385,-309.3619,-168.3588,-177.2797,-3.9408,-279.2997,104.4862,-139.4921,-450.2539,402.541,-437.1151,-337.4914,-200.3446,-164.484,-293.7216,471.7414,192.6153,233.1926,-122.8377,356.5476,450.1361,-400.0941,61.0466,441.7145,189.7192,-69.6348,252.5418,-246.5242,-344.0219,14.2904,87.2185,-119.2684,205.422,-374.4802,33.4042,81.2271,-2.5025,-138.6816,8.1989,-439.7698,-446.1887,-374.9012,160.9795,49.3705,72.7925,245.9454,-138.7558,11.9923,414.9421,5.9535,-142.9589,396.2571,-222.2068,-2.6172,-90.5871,346.7415,-337.3213,-372.4473,91.8271,310.6442,263.7468,-357.0433,-246.0827,25.4967,55.8069,-64.7183,-342.7375,-356.7083,70.0885,-79.026,-346.3906,206.2687,-440.6602,321.8775,223.3025,159.6939,292.4308,241.077,-219.0901,495.9946,0.3506,-166.4262,475.1836,-272.5527,118.8711,458.2456,353.3839,-82.5653,37.2834,-92.4387,146.5082,233.4743,-408.0537,-469.9263,148.8959,-324.352,498.608,-324.5319,-114.6779,-200.4192,404.8448,-289.7989,400.6151,-372.9065,359.7581,141.4237,-304.6837,314.3738,-302.4693,442.6138,-224.0818,270.1887,-477.1098,429.0239,264.1871,26.84,283.4518,129.5215,6.6673,-91.4464,75.821,261.5692,-403.0782,-213.9284,-356.8221,-232.4484,33.5696,99.1931,344.0097,187.4695,-264.0572,-199.6103,342.5485,187.058,31.5948,-275.4046,215.9846,425.1114,327.1992,437.8426,-281.2049,71.7953,393.346,-339.9023,-78.8502,314.1866,-120.7207,-416.0802,-327.1001,413.6143,-236.2051,247.1197,318.5011,-194.295,486.3421,409.0831,252.6212,-452.654,-215.7497,-464.1643,61.9033,66.4139,-425.8918,-401.3522,-395.1639,427.7052,-264.1728,131.9144,258.4416,-442.2357,68.3167,441.5518,138.4774,470.7538,-14.6434,-436.2225,385.0708,286.1155,323.9014,137.4596,-352.5503,1.9307,-314.7656,449.5639,-468.3008,81.2499,487.4562,270.1387,-445.3627,460.1174,-205.2539,-32.6044,359.0438,-115.5841,-268.6624,-495.8554,-474.4781,337.9834,-281.4488,252.1636,-33.645,-26.6636,193.8834,287.2377,6.9748,414.4343,-211.7143,-23.0035,-226.5275,-400.285,-336.3935,28.1908,244.27,21.9938,-222.3759,-103.1418,464.7943,-256.0156,46.7511,-487.2509,-321.3631,479.2142,328.166,-481.2039,253.4962,100.2875,-399.98,-81.5868,289.7597,-318.7266,-264.2078,129.4063,407.6828,222.8346,370.0391,46.9838,-356.4992,-305.9992,-258.4048,-410.7736,-245.9092,32.9185,-237.9085,-403.8853,12.0239,-164.6252,107.369,8.0379,-139.3796,365.9266,-448.5863,314.1141,-280.0686,-463.4747,2.6092,-376.8811,96.7462,242.419,-480.9968,345.3697,328.281,39.0387,-342.3026,469.0461,-103.9411,381.0458,-141.6771,-4.7988,289.4799,-55.0671,-292.4788,364.1267,-395.9876,-232.5859,-285.7012,-444.7762,79.5454,251.5539,359.3705,467.2154,273.1778,-373.8216,299.611,-464.32,-106.0638,491.2626,-39.3721,-110.1154,383.4063,45.0848,262.2361,-111.754,249.0826,-305.9751,22.9663,-120.4794,484.0797,151.9063,388.5088,105.9067,444.0361,-45.5696,243.9313,303.4003,-27.795,-7.2151,411.6561,-100.6193,-207.3277,-6.4576,-300.3722,118.2638,342.3654,66.7861,104.0615,180.5752,281.6788,-342.7549,-65.8778,140.9091,-169.8935,-437.2435,-392.4147,-348.2217,202.3684,440.4071,-276.2247,129.5096,-43.4059,-456.876,-445.1126,-193.8847,-156.3408,274.7116,-129.6168,-484.7027,214.0806,375.6649,444.5303,-71.8577,-474.5957,-342.2716,-322.7281,205.6087,-14.3469,-283.0586,-86.2198,-420.3924,182.3599,22.7485,452.8141,-286.5839,155.1115,-316.4854,-28.3824,56.4873,-146.001,378.2396,473.2566,380.2417,-399.6208,-347.9016,206.5985,-145.9688,-219.9708,-216.6865,404.4334,324.8516,55.3154,-119.4645,-79.2847,-191.5158,-136.3728,413.3355,356.7344,-437.7335,404.9099,-494.6143,135.9107,151.2158,-161.0672,451.0975,-93.0876,495.7659,321.2577,-451.6211,-311.9214,-432.4626,496.8637,382.6126,97.7431,245.2208,-462.5156,-274.939,116.6882,80.6219,315.5602,-342.4345,274.387,-418.7591,53.5711,-96.2339,271.8546,-46.8098,150.3864,206.6682,311.9593,174.7625,-198.5948,105.6143,212.7571,237.4211,-21.2842,-383.0439,285.4973,-80.4955,105.5129,-158.8626,-156.2353,98.5192,-308.2654,-92.7883,45.686,-380.6921,140.1508,365.9526,108.1565,-140.4508,-246.5095,133.3693,-4.6582,-20.843,339.374,-99.2908,17.8824,242.8291,75.8953,-441.8762,-352.3943,-484.0549,-401.3674,321.6953,213.7102,261.1824,-41.5899,65.2736,-26.9977,152.9615,308.5357,-211.4979,477.2073,-414.7828,-330.2034,-123.7898,-261.1105,-328.6632,-15.1514,438.4531,-323.3771,-173.6672,-293.5578,459.1075,-18.34,-270.1311,-315.6445,348.4226,-435.2806,-419.9553,-106.1863,-283.0003,43.5508,-18.0891,224.808,406.4155,-163.6988,-129.2904,207.8322,474.5666,-60.1079,9.563,44.705,118.7999,-301.6795,-38.2161,410.4003,-190.4926,-430.6086,1.2693,312.7535,-455.5725,-271.7346,-159.4378,-227.9918,312.9331,166.2825,-31.7905,-227.9038,-421.644,296.5264,-335.4129,413.344,48.8782,217.3682,434.8719,-387.0484,170.5191,201.0157,127.1522,474.5561,-100.6847,-434.2549,29.5853,-467.6037,184.2936,116.9028,124.6507,-497.3002,-86.4991,59.6243,-104.9888,-294.6228,223.8354,-97.9298,64.2283,203.7397,186.3586,64.5045,122.1795,439.3753,464.9225,434.9882,85.5836,259.4985,70.5414,-117.1196,198.2037,-127.745,-200.2022,-386.0653,1.6688,272.3237,211.4442,445.0575,479.2069,-354.0842,-211.1788,160.3409,258.6131,-71.1154,-196.203,-95.1323,-398.3867,70.6868,15.5394,333.5079,187.8193,-393.7479,269.1152,-336.0885,339.4546,-147.6351,186.847,-126.4872,-108.1731,-70.3962,-389.0454,135.3408,-51.5671,4.6139,-3.1587,-274.941,-208.586,171.0845,-277.1015,-104.1653,-260.934,-310.5456,290.0738,-38.1867,-254.3353,31.6405,433.6526,86.9343,48.5563,137.4622,-34.6388,-1.5028,-452.3147,349.1007,-347.9019,70.4255,-201.5194,-430.2517,177.8199,-391.6226,20.1876,-287.8148,-190.1158,-356.0897,-319.7011,87.2696,-141.1962,-137.9268,-70.4841,95.4435,16.2261,191.5316,-214.8942,142.0224,209.0575,180.5105,26.1511,-497.0902,-186.2708,441.5505,-7.6379,23.9577,-401.2169,-339.3474,16.9572,269.8157,178.6692,299.5455,-367.3993,-413.7073,-96.9188,-472.0939,-327.975,129.6294,446.5669,-32.714,-120.6079,71.7334,190.4871,436.6714,110.0289,-108.4299,8.0033,-341.055,77.7304,-196.1335,-343.1391,-152.6897,-378.0097,-106.9584,395.4607,-98.6717,-131.0531,-140.8907,-185.3101,-68.8474,-478.2088,-18.3317,256.0313,-119.4212,334.7436,318.1335,-20.8287,-147.7622,118.1926,-218.2094,-478.7367,217.0914,219.1878,75.2151,231.5097,-410.8572,-46.2061,153.4654,264.0178,144.8928,-115.1857,-369.8591,126.6643,-122.1998,480.7727,-85.4362,134.3245,-34.403,124.6945,12.1795,-184.8116,390.6826,87.9712,367.0822,-233.2724,-245.9838,104.6339,-53.7753,-264.3381,50.9031,-122.0604,136.6276,465.3429,288.8934,5.7445,-325.7759,53.493,-441.8264,-271.3847,-371.3886,-272.7637,-102.4757,-358.4499,-143.2793,-64.6363,499.8284,-155.8017,-37.8801,63.5318,-377.6101,125.3457,57.231,49.3608,-245.5766,-47.9802,383.4127,-114.1047,-30.258,-479.6988,-194.4846,368.4079,466.1545,-26.7084,8.2433,74.9479,-155.4871,494.9634,-196.3082,-206.8022,423.2288,-494.5835,-291.7666,-204.8478,396.6,-418.9048,-130.0584,-137.5258,-440.7922,73.1423,-251.5694,356.1615,-34.088,-23.3318,43.2522,-297.3896,409.686,-305.5675,424.8321,-154.9096,181.7696,-87.5939,-151.7475,-319.3074,227.2369,-113.0086,-68.1299,368.0398,-20.3706,-296.0095,-269.9336,-250.5127,-56.5895,188.9818,82.7481,488.6398,-151.2088,11.8563,320.4209,316.3155,317.2716,-185.4569,128.2219,108.4381,-453.2648,-406.1359,-414.2863,36.6919,-160.1338,188.7767,364.4688,-13.3882,233.621,11.2764,-154.8894,424.1841,-128.4954,23.1408,183.1928,382.2918,-464.2506,234.1366,-447.21,-425.1161,66.1712,424.058,299.3596,372.7703,-162.3764,-37.8575,-468.5142,189.9036,172.0345,310.1368,-459.7659,-219.5317,-68.9306,211.4315,-408.8232,215.1716,-134.0617,367.326,385.2393,453.6431,-258.6041,194.9712,-266.8576,145.4018,-406.4884,119.3747,466.6835,-404.694,-480.8574,-3.1007,-48.0469,-70.915,-229.4956,-69.6999,-114.9404,372.8744,-247.5689,250.4333,252.9375,71.5672,323.3984,268.7582,16.7518,-258.5373,252.518,378.1721,-197.3271,-211.1179,444.2923,-152.2646,262.3183,159.3338 + ] + - do: + indices.get_mapping: + index: test-too-big-still-float + + - match: { test-too-big-still-float.mappings.properties.my_field.type: float } + + +--- +"Fields with float arrays within the threshold map as dense_vector": + + - do: + index: + index: test-map-dense-vector + id: "1" + refresh: true + body: + my_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + + + - do: + indices.get_mapping: + index: test-map-dense-vector + + - match: { test-map-dense-vector.mappings.properties.my_field.type: dense_vector } + - match: { test-map-dense-vector.mappings.properties.my_field.dims: 128 } + - match: { test-map-dense-vector.mappings.properties.my_field.index: true } + - match: { test-map-dense-vector.mappings.properties.my_field.similarity: cosine } + + - do: + search: + index: test-map-dense-vector + body: + fields: [ my_field ] + knn: + field: my_field + query_vector: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + k: 10 + num_candidates: 128 + + - match: { hits.total.value: 1 } + - match: { hits.hits.0._id: "1"} + +--- +"Fields with explicit float mappings still map to float": + + - do: + indices.create: + index: my-float-index + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_field: + type: float + + - do: + index: + index: my-float-index + id: "1" + refresh: true + body: + my_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + + - do: + indices.get_mapping: + index: my-float-index + + - match: { my-float-index.mappings.properties.my_field.type: float } + +--- +"Dynamically created dense vector fields still require the number of dims to match": + + - do: + indices.create: + index: set-dense-vector + body: + settings: + index: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_field: + type: dense_vector + dims: 5 + index: true + similarity: cosine + + - do: + indices.get_mapping: + index: set-dense-vector + + - match: { set-dense-vector.mappings.properties.my_field.type: dense_vector } + - match: { set-dense-vector.mappings.properties.my_field.dims: 5 } + - match: { set-dense-vector.mappings.properties.my_field.index: true } + - match: { set-dense-vector.mappings.properties.my_field.similarity: cosine } + + - do: + catch: /more dimensions than defined in the mapping/ + index: + index: set-dense-vector + id: "1" + refresh: true + body: + my_field: [ 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228 ] + + - do: + index: + index: test-map-dense-vector + id: "1" + refresh: true + body: + my_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + + - do: + indices.get_mapping: + index: test-map-dense-vector + + - match: { test-map-dense-vector.mappings.properties.my_field.type: dense_vector } + - match: { test-map-dense-vector.mappings.properties.my_field.dims: 128 } + - match: { test-map-dense-vector.mappings.properties.my_field.index: true } + - match: { test-map-dense-vector.mappings.properties.my_field.similarity: cosine } + + - do: + catch: /different number of dimensions/ + index: + index: test-map-dense-vector + id: "1" + refresh: true + body: + my_field: [159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228] + +--- +"If we set dynamic templates to astronomically high or low dim counts, we get a mapping error": + + - do: + catch: bad_request + indices.create: + index: bad-dynamic-template-index + body: + mappings: + dynamic_templates: + - bad_dynamic_template: + match: "*dense_vector*" + mapping: + type: dense_vector + dims: 3000 + index: true + similarity: cosine + + - do: + catch: bad_request + indices.create: + index: bad-dynamic-template-index + body: + mappings: + dynamic_templates: + - bad_dynamic_template: + match: "*dense_vector*" + mapping: + type: dense_vector + dims: 0 + index: true + similarity: cosine + +--- +"We respect values set in dynamic_templates for floats and dense vectors": + + - do: + indices.create: + index: my-dynamic-template-index + body: + mappings: + dynamic_templates: + - my_vector_template: + match: "*dense_vector*" + mapping: + type: dense_vector + dims: 5 + index: true + similarity: cosine + - my_float_template: + match: "*float*" + mapping: + type: float + + - do: + index: + index: my-dynamic-template-index + id: "1" + refresh: true + body: + my_dense_vector_field: [ 159.1, 289.56, -128.7424, 145.9871, -164.0003 ] + + - do: + catch: /different number of dimensions/ + index: + index: my-dynamic-template-index + id: "2" + refresh: true + body: + my_mismatched_dense_vector_field: [ 159.1, 289.56, -128.7424 ] + + - do: + index: + index: my-dynamic-template-index + id: "3" + refresh: true + body: + my_small_float_field: [ 159.1, 289.56, -128.7424, 145.9871, -164.0003 ] + + - do: + index: + index: my-dynamic-template-index + id: "3" + refresh: true + body: + my_float_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + + - do: + index: + index: my-dynamic-template-index + id: "4" + refresh: true + body: + my_dynamic_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + + - do: + indices.get_mapping: + index: my-dynamic-template-index + + - match: { my-dynamic-template-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { my-dynamic-template-index.mappings.properties.my_dense_vector_field.dims: 5 } + - match: { my-dynamic-template-index.mappings.properties.my_dense_vector_field.index: true } + - match: { my-dynamic-template-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { my-dynamic-template-index.mappings.properties.my_small_float_field.type: float } + - match: { my-dynamic-template-index.mappings.properties.my_float_field.type: float } + - match: { my-dynamic-template-index.mappings.properties.my_dynamic_field.type: dense_vector } + - match: { my-dynamic-template-index.mappings.properties.my_dynamic_field.dims: 128 } + - match: { my-dynamic-template-index.mappings.properties.my_dynamic_field.index: true } + - match: { my-dynamic-template-index.mappings.properties.my_dynamic_field.similarity: cosine } + +--- +"Fields mapped as dense_vector without dims dynamically map the dim size to the first indexed document": + + - do: + indices.create: + index: test-mapped-index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_dense_vector_field: + type: dense_vector + + - do: + indices.get_mapping: + index: test-mapped-index + + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.index: true } + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.dims: null } + + - do: + index: + index: test-mapped-index + id: "1" + refresh: true + body: + my_dense_vector_field: [ 159.1, 289.56, -128.7424 ] + + - do: + indices.get_mapping: + index: test-mapped-index + + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.type: dense_vector } + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.index: true } + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.similarity: cosine } + - match: { test-mapped-index.mappings.properties.my_dense_vector_field.dims: 3 } + +--- +"Sub-objects mapped as dense_vector without dims dynamically map the dim size to the first indexed document": + + - do: + indices.create: + index: test-mapped-index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_parent_object: + type: object + properties: + my_child_dense_vector_field: + type: dense_vector + + - do: + indices.get_mapping: + index: test-mapped-index + + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.type: dense_vector } + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.index: true } + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.similarity: cosine } + - is_false: test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.dims + + - do: + index: + index: test-mapped-index + id: "1" + refresh: true + body: + my_parent_object: + my_child_dense_vector_field: [ 159.1, 289.56, -128.7424 ] + + - do: + indices.get_mapping: + index: test-mapped-index + + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.type: dense_vector } + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.index: true } + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.similarity: cosine } + - match: { test-mapped-index.mappings.properties.my_parent_object.properties.my_child_dense_vector_field.dims: 3 } + + +--- +"Fields with sub-object float arrays within the threshold map as dense_vector": + + - do: + index: + index: test-map-dense-vector-in-object + id: "1" + refresh: true + body: + parent_field: + my_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922, 1.234, 5.34 + ] + + + - do: + indices.get_mapping: + index: test-map-dense-vector-in-object + + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.type: dense_vector } + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.dims: 130 } + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.index: true } + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.similarity: cosine } + +--- +"Fields with sub-objects mapped as dense_vector without specified dims get the dim size mapped to the size of the first indexed array": + + - do: + index: + index: test-map-dense-vector-in-object + id: "1" + refresh: true + body: + parent_field: + my_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922, 1.23, 4.34, 2.12, -35.3 + ] + + + - do: + indices.get_mapping: + index: test-map-dense-vector-in-object + + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.type: dense_vector } + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.dims: 132 } + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.index: true } + - match: { test-map-dense-vector-in-object.mappings.properties.parent_field.properties.my_field.similarity: cosine } + + +--- +"Fields with nested float arrays within the threshold map throw an exception": + + - do: + indices.create: + index: test-nested-index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_nested_field: + type: + nested + + - do: + catch: /\[dense_vector\] fields cannot be indexed if they\'re within \[nested\] mappings/ + index: + index: test-nested-index + id: "1" + refresh: true + body: + my_nested_field: + my_field: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + +--- +"Fields with copyTo fields still map as float": + + - do: + indices.create: + index: test-copyto-index + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + mappings: + properties: + my_float1: + type: + float + copy_to: my_copyto_field + my_float2: + type: + float + copy_to: my_copyto_field + + - do: + index: + index: test-copyto-index + id: "1" + refresh: true + body: + my_float1: [ + 159.1, 289.56, -128.7424, 145.9871, -164.0003, 86.4034, -89.6929, 257.9717, 131.6075, 67.9233, -144.8255, 223.8446, 77.3228, -210.1163, -139.4783, 12.6499, 15.4491, 108.3465, -189.3947, 178.2045, -187.5925, 184.5089, 77.3022, -202.7439, -13.4959, 115.9719, -139.4332, 196.7845, 104.7573, -156.7746, 166.9878, 68.3936, 159.8473, -141.4446, 21.1947, 186.5908, -209.6895, 68.6169, 44.1255, 147.4659, 56.5079, -179.7997, -85.1651, 11.4847, 124.1662, 96.2246, -178.6705, 85.5925, 205.3616, -16.4704, 172.4947, -115.2535, -58.1722, 94.4836, 34.6458, -70.1011, -58.8047, 149.9562, -37.8998, 196.9805, -169.3555, -163.9432, 188.5611, 214.8378, 29.3182, -24.8724, 152.9382, -109.4345, -123.6716, -8.2441, 64.5902, 27.8083, 40.8185, -94.3161, 58.1463, -138.7432, 24.6805, -88.7222, -11.2018, 206.6434, 201.9024, 87.3079, -3.2883, -60.2484, -109.5789, 105.5766, -116.6709, -17.7073, -71.5093, -75.2937, -176.8691, -146.4967, 53.7586, 199.5294, 55.9754, -48.7399, 82.2051, 135.2921, 22.4408, -116.4008, -33.7538, 29.7207, 6.3692, -97.5768, -12.7982, -200.9331, -62.2743, 81.0843, 136.2247, 150.2565, 139.6838, 155.2657, -25.7447, 198.5955, 18.8099, 46.9014, -60.2672, 136.4801, 171.8966, 172.5842, 13.9123, 75.8386, -64.2444, -48.1964, 135.9685, 7.4927, -40.6424, -76.8922 + ] + + - do: + indices.get_mapping: + index: test-copyto-index + + - match: { test-copyto-index.mappings.properties.my_float1.type: float } + - match: { test-copyto-index.mappings.properties.my_float2.type: float } + - match: { test-copyto-index.mappings.properties.my_copyto_field.type: float } + diff --git a/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml new file mode 100644 index 0000000000000..2ddb95d6fc139 --- /dev/null +++ b/rest-api-spec/src/yamlRestTest/resources/rest-api-spec/test/search.vectors/90_sparse_vector.yml @@ -0,0 +1,137 @@ +--- +"Indexing and searching sparse vectors": + + - skip: + version: " - 8.10.99" + reason: "sparse_vector field type reintroduced in 8.11" + + - do: + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + ml.tokens: + type: sparse_vector + + - match: { acknowledged: true } + + - do: + index: + index: test + id: "1" + body: + text: "running is good for you" + ml: + tokens: + running: 2.4097164 + good: 2.170997 + run: 2.052153 + race: 1.4575411 + for: 1.1908325 + runner: 1.1803857 + exercise: 1.1652642 + you: 0.9654308 + training: 0.94999343 + sports: 0.93650943 + fitness: 0.83129317 + best: 0.820365 + bad: 0.7385934 + health: 0.7098149 + marathon: 0.61555296 + gym: 0.5652374 + + - match: { result: "created" } + + - do: + index: + index: test + id: "2" + body: + text: "walking is a healthy exercise" + ml: + tokens: + walking: 2.4797723 + exercise: 2.074234 + healthy: 1.971596 + walk: 1.6458614 + health: 1.5291847 + walker: 1.4736869 + activity: 1.0793462 + good: 1.0597849 + fitness: 0.91855437 + training: 0.86342937 + movement: 0.7657065 + normal: 0.6694081 + foot: 0.5892523 + physical: 0.4926789 + + - match: { result: "created" } + + - do: + indices.refresh: { } + + - do: + search: + index: test + body: + query: + bool: + should: + - term: + ml.tokens: + value: "walk" + boost: 1.9790847 + - term: + ml.tokens: + value: "walking" + boost: 1.7092685 + - term: + ml.tokens: + value: "exercise" + boost: 0.84076905 + + - match: { hits.total.value: 2 } + - match: { hits.hits.0._id: "2" } + - match: { hits.hits.1._id: "1" } + +--- +"Sparse vector in 7.x": + - skip: + features: allowed_warnings + version: "8.0.0 - " + reason: "sparse_vector field type supported in 7.x" + - do: + allowed_warnings: + - "The [sparse_vector] field type is deprecated and will be removed in 8.0." + - "[sparse_vector] field type in old 7.x indices is allowed to contain [sparse_vector] fields, but they cannot be indexed or searched." + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + ml.tokens: + type: sparse_vector + + - match: { acknowledged: true } + +--- +"Sparse vector in 8.x": + - skip: + version: " - 7.99.99, 8.11.0 - " + reason: "sparse_vector field type not supported in 8.x until 8.11.0" + - do: + catch: /The \[sparse_vector\] field type .* supported/ + indices.create: + index: test + body: + mappings: + properties: + text: + type: text + ml.tokens: + type: sparse_vector diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java index 3070641cd964c..600a4b9fcae9b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/IndicesRequestIT.java @@ -98,6 +98,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Executor; import java.util.function.Function; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; @@ -754,7 +755,7 @@ public List getTransportInterceptors( @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java index 1382e0c30adae..2645243916d76 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/CancellableTasksIT.java @@ -434,7 +434,7 @@ static class TestRequest extends ActionRequest { super(in); this.id = in.readInt(); this.node = new DiscoveryNode(in); - this.subRequests = in.readList(TestRequest::new); + this.subRequests = in.readCollectionAsList(TestRequest::new); this.timeout = in.readBoolean(); } @@ -457,7 +457,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeInt(id); node.writeTo(out); - out.writeList(subRequests); + out.writeCollection(subRequests); out.writeBoolean(timeout); } @@ -538,7 +538,7 @@ protected void doRun() throws Exception { } listener.onResponse(new TestResponse()); } - }, delay, ThreadPool.Names.GENERIC); + }, delay, transportService.getThreadPool().generic()); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java index 606e023b6493a..390a3a8bdac4c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/CCSPointInTimeIT.java @@ -9,24 +9,57 @@ package org.elasticsearch.action.search; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.PointInTimeBuilder; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.ccs.CrossClusterSearchIT; +import org.elasticsearch.search.query.ThrowingQueryBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; +import org.elasticsearch.transport.RemoteClusterAware; import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; public class CCSPointInTimeIT extends AbstractMultiClustersTestCase { + public static final String REMOTE_CLUSTER = "remote_cluster"; + @Override protected Collection remoteClusterAlias() { - return List.of("remote_cluster"); + return List.of(REMOTE_CLUSTER); + } + + @Override + protected Collection> nodePlugins(String clusterAlias) { + return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), CrossClusterSearchIT.TestQueryBuilderPlugin.class); + } + + public static class TestQueryBuilderPlugin extends Plugin implements SearchPlugin { + public TestQueryBuilderPlugin() {} + + @Override + public List> getQueries() { + QuerySpec throwingSpec = new QuerySpec<>(ThrowingQueryBuilder.NAME, ThrowingQueryBuilder::new, p -> { + throw new IllegalStateException("not implemented"); + }); + + return List.of(throwingSpec); + } } void indexDocs(Client client, String index, int numDocs) { @@ -39,7 +72,7 @@ void indexDocs(Client client, String index, int numDocs) { public void testBasic() { final Client localClient = client(LOCAL_CLUSTER); - final Client remoteClient = client("remote_cluster"); + final Client remoteClient = client(REMOTE_CLUSTER); int localNumDocs = randomIntBetween(10, 50); assertAcked(localClient.admin().indices().prepareCreate("local_test")); indexDocs(localClient, "local_test", localNumDocs); @@ -71,11 +104,102 @@ public void testBasic() { .get(); assertNoFailures(resp); assertHitCount(resp, (includeLocalIndex ? localNumDocs : 0) + remoteNumDocs); + + SearchResponse.Clusters clusters = resp.getClusters(); + int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); + assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + assertThat(clusters.getSuccessful(), equalTo(expectedNumClusters)); + assertThat(clusters.getSkipped(), equalTo(0)); + + if (includeLocalIndex) { + AtomicReference localClusterRef = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterRef); + assertOneSuccessfulShard(localClusterRef.get()); + } + + AtomicReference remoteClusterRef = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterRef); + assertOneSuccessfulShard(remoteClusterRef.get()); + } finally { closePointInTime(pitId); } } + public void testFailuresOnOneShardsWithPointInTime() throws ExecutionException, InterruptedException { + final Client localClient = client(LOCAL_CLUSTER); + final Client remoteClient = client(REMOTE_CLUSTER); + int localNumDocs = randomIntBetween(10, 50); + int numShards = randomIntBetween(2, 4); + Settings clusterSettings = indexSettings(numShards, randomIntBetween(0, 1)).build(); + assertAcked(localClient.admin().indices().prepareCreate("local_test").setSettings(clusterSettings)); + indexDocs(localClient, "local_test", localNumDocs); + + int remoteNumDocs = randomIntBetween(10, 50); + assertAcked(remoteClient.admin().indices().prepareCreate("remote_test").setSettings(clusterSettings)); + indexDocs(remoteClient, "remote_test", remoteNumDocs); + boolean includeLocalIndex = randomBoolean(); + List indices = new ArrayList<>(); + if (includeLocalIndex) { + indices.add(randomFrom("*", "local_*", "local_test")); + } + indices.add(randomFrom("*:*", "remote_cluster:*", "remote_cluster:remote_test")); + String pitId = openPointInTime(indices.toArray(new String[0]), TimeValue.timeValueMinutes(2)); + try { + if (randomBoolean()) { + localClient.prepareIndex("local_test").setId("local_new").setSource().get(); + localClient.admin().indices().prepareRefresh().get(); + } + if (randomBoolean()) { + remoteClient.prepareIndex("remote_test").setId("remote_new").setSource().get(); + remoteClient.admin().indices().prepareRefresh().get(); + } + // shardId 0 means to throw the Exception only on shard 0; all others should work + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); + SearchRequest searchRequest = new SearchRequest(); + searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10).pointInTimeBuilder(new PointInTimeBuilder(pitId))); + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); + + SearchResponse.Clusters clusters = searchResponse.getClusters(); + int expectedNumClusters = 1 + (includeLocalIndex ? 1 : 0); + assertThat(clusters.getTotal(), equalTo(expectedNumClusters)); + assertThat(clusters.getSuccessful(), equalTo(expectedNumClusters)); + assertThat(clusters.getSkipped(), equalTo(0)); + + if (includeLocalIndex) { + AtomicReference localClusterRef = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + assertNotNull(localClusterRef); + assertOneFailedShard(localClusterRef.get(), numShards); + } + AtomicReference remoteClusterRef = clusters.getCluster(REMOTE_CLUSTER); + assertNotNull(remoteClusterRef); + assertOneFailedShard(remoteClusterRef.get(), numShards); + + } finally { + closePointInTime(pitId); + } + } + + private static void assertOneSuccessfulShard(SearchResponse.Cluster cluster) { + assertThat(cluster.getTotalShards(), equalTo(1)); + assertThat(cluster.getSuccessfulShards(), equalTo(1)); + assertThat(cluster.getFailedShards(), equalTo(0)); + assertThat(cluster.getFailures().size(), equalTo(0)); + assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertFalse(cluster.isTimedOut()); + } + + private static void assertOneFailedShard(SearchResponse.Cluster cluster, int totalShards) { + assertThat(cluster.getSuccessfulShards(), equalTo(totalShards - 1)); + assertThat(cluster.getFailedShards(), equalTo(1)); + assertThat(cluster.getFailures().size(), equalTo(1)); + assertThat(cluster.getFailures().get(0).reason(), containsString("index corrupted")); + assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(cluster.getTook().millis(), greaterThanOrEqualTo(0L)); + assertFalse(cluster.isTimedOut()); + } + private String openPointInTime(String[] indices, TimeValue keepAlive) { OpenPointInTimeRequest request = new OpenPointInTimeRequest(indices).keepAlive(keepAlive); final OpenPointInTimeResponse response = client().execute(OpenPointInTimeAction.INSTANCE, request).actionGet(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java index 3cd40347c8de7..df36f1babd364 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/SearchProgressActionListenerIT.java @@ -18,6 +18,7 @@ import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.TaskId; @@ -109,14 +110,15 @@ public void onListShards( List searchShards, List skippedShards, SearchResponse.Clusters clusters, - boolean fetchPhase + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider ) { shardsListener.set(searchShards); assertEquals(fetchPhase, hasFetchPhase); } @Override - public void onQueryResult(int shardIndex) { + public void onQueryResult(int shardIndex, QuerySearchResult result) { assertThat(shardIndex, lessThan(shardsListener.get().size())); numQueryResults.incrementAndGet(); } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java index 006a10e99489f..655a13c154338 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/search/TransportSearchIT.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.ScoreMode; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse; @@ -634,7 +635,7 @@ public long bytesToPreallocate() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index 8f8d982dd14c1..f561cc50b4f19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -31,6 +30,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.collect.ImmutableOpenMap; @@ -227,7 +227,7 @@ private DiscoveryNode randomNode(String nodeId) { */ private ClusterState.Builder randomNodes(ClusterState clusterState) { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); - Map transports = new HashMap<>(clusterState.transportVersions()); + Map versions = new HashMap<>(clusterState.compatibilityVersions()); List nodeIds = randomSubsetOf( randomInt(clusterState.nodes().getNodes().size() - 1), clusterState.nodes().getNodes().keySet().toArray(new String[0]) @@ -235,10 +235,10 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) { for (String nodeId : nodeIds) { if (nodeId.startsWith("node-")) { nodes.remove(nodeId); - transports.remove(nodeId); + versions.remove(nodeId); if (randomBoolean()) { nodes.add(randomNode(nodeId)); - transports.put(nodeId, TransportVersionUtils.randomVersion(random())); + versions.put(nodeId, new CompatibilityVersions(TransportVersionUtils.randomVersion(random()))); } } } @@ -246,10 +246,10 @@ private ClusterState.Builder randomNodes(ClusterState clusterState) { for (int i = 0; i < additionalNodeCount; i++) { String id = "node-" + randomAlphaOfLength(10); nodes.add(randomNode(id)); - transports.put(id, TransportVersionUtils.randomVersion(random())); + versions.put(id, new CompatibilityVersions(TransportVersionUtils.randomVersion(random()))); } - return ClusterState.builder(clusterState).nodes(nodes).transportVersions(transports); + return ClusterState.builder(clusterState).nodes(nodes).compatibilityVersions(versions); } /** diff --git a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java index 09606ba3bbe43..86b1cdf315f55 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/index/mapper/DynamicMappingIT.java @@ -48,6 +48,7 @@ import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING; import static org.elasticsearch.index.mapper.MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING; +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertSearchHits; import static org.hamcrest.Matchers.containsString; @@ -662,4 +663,32 @@ public void testSubobjectsFalse() throws Exception { assertNotNull(properties.get("time.max")); }); } + + public void testKnnSubObject() throws Exception { + assertAcked(indicesAdmin().prepareCreate("test").setMapping(""" + { + "properties": { + "obj": { + "type": "object", + "dynamic": "true" + }, + "mapped_obj": { + "type": "object", + "dynamic": "true", + "properties": { + "vector": { + "type": "dense_vector" + } + } + } + } + }""").get()); + + client().index(new IndexRequest("test").source("mapped_obj.vector", Randomness.get().doubles(3, 0.0, 5.0).toArray())).get(); + + client().index( + new IndexRequest("test").source("obj.vector", Randomness.get().doubles(MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING, 0.0, 5.0).toArray()) + ).get(); + + } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java index 8f2341ecded95..db5578ee6e60b 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/indices/IndexingMemoryControllerIT.java @@ -9,6 +9,7 @@ import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.index.IndexService; @@ -27,7 +28,7 @@ import java.util.Optional; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class IndexingMemoryControllerIT extends ESSingleNodeTestCase { @@ -104,7 +105,6 @@ public void testDeletesAloneCanTriggerRefresh() throws Exception { for (int i = 0; i < 100; i++) { client().prepareDelete("index", Integer.toString(i)).get(); } - // need to assert busily as IndexingMemoryController refreshes in background - assertBusy(() -> assertThat(shard.refreshStats().getTotal(), greaterThan(refreshStats.getTotal() + 1))); + assertThat(shard.getEngineOrNull().getIndexBufferRAMBytesUsed(), lessThanOrEqualTo(ByteSizeUnit.KB.toBytes(1))); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java index 4323eea1c66fc..ab24bf923b9db 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/readiness/ReadinessClusterIT.java @@ -21,23 +21,28 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.MasterNotDiscoveredException; +import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reservedstate.action.ReservedClusterSettingsAction; import org.elasticsearch.reservedstate.service.FileSettingsService; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.InternalTestCluster; -import org.elasticsearch.test.readiness.ReadinessClientProbe; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import static org.elasticsearch.node.Node.INITIAL_STATE_TIMEOUT_SETTING; +import static org.elasticsearch.readiness.MockReadinessService.tcpReadinessProbeFalse; +import static org.elasticsearch.readiness.MockReadinessService.tcpReadinessProbeTrue; import static org.elasticsearch.test.NodeRoles.dataOnlyNode; import static org.elasticsearch.test.NodeRoles.masterNode; import static org.elasticsearch.test.NodeRoles.nonDataNode; @@ -48,7 +53,7 @@ import static org.hamcrest.Matchers.notNullValue; @ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, autoManageMasterNodes = false) -public class ReadinessClusterIT extends ESIntegTestCase implements ReadinessClientProbe { +public class ReadinessClusterIT extends ESIntegTestCase { private static AtomicLong versionCounter = new AtomicLong(1); @@ -86,6 +91,13 @@ protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { return settings.build(); } + @Override + protected Collection> getMockPlugins() { + final List> plugins = new ArrayList<>(super.getMockPlugins()); + plugins.add(MockReadinessService.TestPlugin.class); + return Collections.unmodifiableList(plugins); + } + private void assertMasterNode(Client client, String node) { assertThat( client.admin().cluster().prepareState().execute().actionGet().getState().nodes().getMasterNode().getName(), @@ -115,11 +127,7 @@ public void testReadinessDuringRestarts() throws Exception { tcpReadinessProbeTrue(internalCluster().getInstance(ReadinessService.class, dataNode)); tcpReadinessProbeTrue(internalCluster().getInstance(ReadinessService.class, masterNode)); - Integer masterPort = internalCluster().getInstance(ReadinessService.class, internalCluster().getMasterName()) - .boundAddress() - .publishAddress() - .getPort(); - + final var masterReadinessService = internalCluster().getInstance(ReadinessService.class, masterNode); assertMasterNode(internalCluster().nonMasterClient(), masterNode); logger.info("--> stop master node"); @@ -127,7 +135,7 @@ public void testReadinessDuringRestarts() throws Exception { internalCluster().stopCurrentMasterNode(); expectMasterNotFound(); - tcpReadinessProbeFalse(masterPort); + tcpReadinessProbeFalse(masterReadinessService); logger.info("--> start previous master node again"); final String nextMasterEligibleNodeName = internalCluster().startNode( @@ -154,8 +162,7 @@ public void testReadinessDuringRestartsNormalOrder() throws Exception { tcpReadinessProbeTrue(internalCluster().getInstance(ReadinessService.class, masterNode)); for (String dataNode : dataNodes) { - ReadinessService s = internalCluster().getInstance(ReadinessService.class, dataNode); - tcpReadinessProbeTrue(s); + tcpReadinessProbeTrue(internalCluster().getInstance(ReadinessService.class, dataNode)); } logger.info("--> restart data node 1"); @@ -192,8 +199,7 @@ public Settings onNodeStopped(String nodeName) throws Exception { ensureGreen(); for (String dataNode : dataNodes) { - ReadinessService s = internalCluster().getInstance(ReadinessService.class, dataNode); - tcpReadinessProbeTrue(s); + tcpReadinessProbeTrue(internalCluster().getInstance(ReadinessService.class, dataNode)); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java index 40ec70511e44e..8a13ce9988835 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/SearchCancellationIT.java @@ -221,6 +221,17 @@ public void testCancelMultiSearch() throws Exception { } } + /** + * The test `testCancelFailedSearchWhenPartialResultDisallowed` usually fails when concurrency is enabled unless + * the `cancelledLatch.await()` section is commented out. However, this approach seems prone to race conditions. + * Further investigation is needed to determine if this test just needs to be revised, or rather, if it is + * detecting a deeper issue. For now, we will disable concurrency here. + */ + @Override + protected boolean enableConcurrentSearch() { + return false; + } + public void testCancelFailedSearchWhenPartialResultDisallowed() throws Exception { final List plugins = initBlockFactory(); int numberOfShards = between(2, 5); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java index f3173b291671b..c8d89785fc4af 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/SignificantTermsSignificanceScoreIT.java @@ -239,6 +239,11 @@ public void testPopularTermManyDeletedDocs() throws Exception { } + @Override + protected boolean enableConcurrentSearch() { + return false; + } + public void testBackgroundVsSeparateSet() throws Exception { String type = randomBoolean() ? "text" : "long"; String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}"; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java index 575a8269a87eb..85cc185d1f558 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/aggregations/bucket/TermsShardMinDocCountIT.java @@ -35,6 +35,11 @@ private static String randomExecutionHint() { return randomBoolean() ? null : randomFrom(SignificantTermsAggregatorFactory.ExecutionMode.values()).toString(); } + @Override + protected boolean enableConcurrentSearch() { + return false; + } + // see https://github.com/elastic/elasticsearch/issues/5998 public void testShardMinDocCountSignificantTermsTest() throws Exception { String textMappings; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java index 7978b854255c7..ccc7d6bc5009c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/ccs/CrossClusterSearchIT.java @@ -11,33 +11,34 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.search.query.SlowRunningQueryBuilder; import org.elasticsearch.search.query.ThrowingQueryBuilder; import org.elasticsearch.test.AbstractMultiClustersTestCase; import org.elasticsearch.test.InternalTestCluster; import org.elasticsearch.transport.RemoteClusterAware; import org.elasticsearch.transport.RemoteTransportException; -import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; -import java.util.stream.Collectors; -import java.util.stream.Stream; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.hamcrest.Matchers.containsString; @@ -69,8 +70,7 @@ protected boolean reuseClusters() { @Override protected Collection> nodePlugins(String clusterAlias) { - List> plugs = Arrays.asList(TestQueryBuilderPlugin.class); - return Stream.concat(super.nodePlugins(clusterAlias).stream(), plugs.stream()).collect(Collectors.toList()); + return CollectionUtils.appendToCopy(super.nodePlugins(clusterAlias), CrossClusterSearchIT.TestQueryBuilderPlugin.class); } public static class TestQueryBuilderPlugin extends Plugin implements SearchPlugin { @@ -78,11 +78,18 @@ public TestQueryBuilderPlugin() {} @Override public List> getQueries() { + QuerySpec slowRunningSpec = new QuerySpec<>( + SlowRunningQueryBuilder.NAME, + SlowRunningQueryBuilder::new, + p -> { + throw new IllegalStateException("not implemented"); + } + ); QuerySpec throwingSpec = new QuerySpec<>(ThrowingQueryBuilder.NAME, ThrowingQueryBuilder::new, p -> { throw new IllegalStateException("not implemented"); }); - return List.of(throwingSpec); + return List.of(slowRunningSpec, throwingSpec); } } @@ -93,18 +100,26 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); - PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); + if (randomBoolean()) { + searchRequest = searchRequest.scroll("1m"); + } searchRequest.allowPartialSearchResults(false); - boolean minimizeRoundtrips = true; // TODO: support MRT=false + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + boolean minimizeRoundtrips = randomBoolean(); searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(1000)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); - - assertBusy(() -> assertTrue(queryFuture.isDone())); - - SearchResponse searchResponse = queryFuture.get(); + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); assertNotNull(searchResponse); SearchResponse.Clusters clusters = searchResponse.getClusters(); @@ -145,20 +160,27 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); - PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); searchRequest.allowPartialSearchResults(false); - boolean minimizeRoundtrips = true; // TODO support MRT=false + boolean minimizeRoundtrips = randomBoolean(); searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + searchRequest.setPreFilterShardSize(1); RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(EARLIEST_TIMESTAMP - 2000) .to(EARLIEST_TIMESTAMP - 1000); - searchRequest.source(new SearchSourceBuilder().query(rangeQueryBuilder).size(1000)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); - - assertBusy(() -> assertTrue(queryFuture.isDone())); + searchRequest.source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); - SearchResponse searchResponse = queryFuture.get(); + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); assertNotNull(searchResponse); SearchResponse.Clusters clusters = searchResponse.getClusters(); @@ -175,7 +197,12 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + if (dfs) { + // with DFS_QUERY_THEN_FETCH, the local shards are never skipped + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + } else { + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + } assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -183,7 +210,11 @@ public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Except assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + } else { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); + } assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); @@ -196,20 +227,25 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); - PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); searchRequest.allowPartialSearchResults(true); - boolean minimizeRoundtrips = true; // TODO support MRT=false + boolean minimizeRoundtrips = randomBoolean(); searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); - + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } // shardId 0 means to throw the Exception only on shard 0; all others should work ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); - - assertBusy(() -> assertTrue(queryFuture.isDone())); - SearchResponse searchResponse = queryFuture.get(); + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); assertNotNull(searchResponse); SearchResponse.Clusters clusters = searchResponse.getClusters(); @@ -219,27 +255,11 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); assertNotNull(localClusterSearchInfo); - assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); - assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards - 1)); - assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(localClusterSearchInfo.getFailedShards(), equalTo(1)); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); - assertThat(localClusterSearchInfo.getTook().millis(), greaterThan(0L)); - ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertOneFailedShard(localClusterSearchInfo, localNumShards); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards - 1)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(1)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); } public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws Exception { @@ -247,13 +267,23 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E String localIndex = (String) testClusterInfo.get("local.index"); String remoteIndex = (String) testClusterInfo.get("remote.index"); int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); boolean skipUnavailable = (Boolean) testClusterInfo.get("remote.skip_unavailable"); PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); searchRequest.allowPartialSearchResults(true); - boolean minimizeRoundtrips = true; // TODO support MRT=false - searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + boolean dfs = randomBoolean(); + if (dfs) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } // throw Exception on all shards of remoteIndex, but not against localIndex ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder( @@ -262,11 +292,14 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E remoteIndex ); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(searchRequest); + + client(LOCAL_CLUSTER).search(searchRequest, queryFuture); assertBusy(() -> assertTrue(queryFuture.isDone())); - if (skipUnavailable == false) { + // dfs=true overrides the minimize_roundtrips=true setting and does not minimize roundtrips + if (skipUnavailable == false && minimizeRoundtrips && dfs == false) { ExecutionException ee = expectThrows(ExecutionException.class, () -> queryFuture.get()); assertNotNull(ee.getCause()); assertThat(ee.getCause(), instanceOf(RemoteTransportException.class)); @@ -277,7 +310,9 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E assertNotNull(searchResponse); SearchResponse.Clusters clusters = searchResponse.getClusters(); - assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); + if (dfs == false) { + assertThat(clusters.isCcsMinimizeRoundtrips(), equalTo(minimizeRoundtrips)); + } assertThat(clusters.getTotal(), equalTo(2)); assertThat(clusters.getSuccessful(), equalTo(1)); assertThat(clusters.getSkipped(), equalTo(1)); @@ -299,11 +334,19 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + if (clusters.isCcsMinimizeRoundtrips()) { + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + } else { + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); + } assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); @@ -311,22 +354,86 @@ public void testClusterDetailsAfterCCSWithFailuresOnRemoteClusterOnly() throws E } } + public void testCCSWithSearchTimeoutOnRemoteCluster() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + + SearchRequest searchRequest = new SearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); + searchRequest.allowPartialSearchResults(true); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + if (randomBoolean()) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + + TimeValue searchTimeout = new TimeValue(100, TimeUnit.MILLISECONDS); + // query builder that will sleep for the specified amount of time in the query phase + SlowRunningQueryBuilder slowRunningQueryBuilder = new SlowRunningQueryBuilder(searchTimeout.millis() * 5); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); + searchRequest.source(sourceBuilder); + + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); + assertNotNull(searchResponse); + + SearchResponse.Clusters clusters = searchResponse.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getSuccessful(), equalTo(2)); + assertThat(clusters.getSkipped(), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); + assertNotNull(localClusterSearchInfo); + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertTrue(localClusterSearchInfo.isTimedOut()); + assertThat(localClusterSearchInfo.getIndexExpression(), equalTo(localIndex)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); + assertNotNull(remoteClusterSearchInfo); + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertTrue(remoteClusterSearchInfo.isTimedOut()); + assertThat(remoteClusterSearchInfo.getIndexExpression(), equalTo(remoteIndex)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + } + public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { Map testClusterInfo = setupTwoClusters(); String remoteIndex = (String) testClusterInfo.get("remote.index"); int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); - PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(REMOTE_CLUSTER + ":" + remoteIndex); searchRequest.allowPartialSearchResults(false); - boolean minimizeRoundtrips = true; // TODO support MRT=false - searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); - searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(1000)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); - - assertBusy(() -> assertTrue(queryFuture.isDone())); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + if (randomBoolean()) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } + searchRequest.source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); - SearchResponse searchResponse = queryFuture.get(); + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); assertNotNull(searchResponse); SearchResponse.Clusters clusters = searchResponse.getClusters(); @@ -353,20 +460,24 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio String remoteIndex = (String) testClusterInfo.get("remote.index"); int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); - PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(REMOTE_CLUSTER + ":" + remoteIndex); searchRequest.allowPartialSearchResults(true); - boolean minimizeRoundtrips = true; // TODO support MRT=false - searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + if (randomBoolean()) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } // shardId 0 means to throw the Exception only on shard 0; all others should work ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); - - assertBusy(() -> assertTrue(queryFuture.isDone())); - SearchResponse searchResponse = queryFuture.get(); + SearchResponse searchResponse = client(LOCAL_CLUSTER).search(searchRequest).get(); assertNotNull(searchResponse); SearchResponse.Clusters clusters = searchResponse.getClusters(); @@ -378,37 +489,38 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); assertNotNull(remoteClusterSearchInfo); - assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards - 1)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(1)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThan(0L)); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertOneFailedShard(remoteClusterSearchInfo, remoteNumShards); } public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { Map testClusterInfo = setupTwoClusters(); String remoteIndex = (String) testClusterInfo.get("remote.index"); - int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); boolean skipUnavailable = (Boolean) testClusterInfo.get("remote.skip_unavailable"); PlainActionFuture queryFuture = new PlainActionFuture<>(); SearchRequest searchRequest = new SearchRequest(REMOTE_CLUSTER + ":" + remoteIndex); searchRequest.allowPartialSearchResults(true); - boolean minimizeRoundtrips = true; // TODO support MRT=false - searchRequest.setCcsMinimizeRoundtrips(minimizeRoundtrips); + searchRequest.setCcsMinimizeRoundtrips(randomBoolean()); + if (randomBoolean()) { + searchRequest.setBatchedReduceSize(randomIntBetween(3, 20)); + } + if (randomBoolean()) { + searchRequest.searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + if (randomBoolean()) { + searchRequest.setPreFilterShardSize(1); + } // shardId -1 means to throw the Exception on all shards, so should result in complete search failure ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), -1); searchRequest.source(new SearchSourceBuilder().query(queryBuilder).size(10)); - client(LOCAL_CLUSTER).search(searchRequest, queryFuture); + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(searchRequest); + + client(LOCAL_CLUSTER).search(searchRequest, queryFuture); assertBusy(() -> assertTrue(queryFuture.isDone())); - if (skipUnavailable == false) { + if (skipUnavailable == false || minimizeRoundtrips == false) { ExecutionException ee = expectThrows(ExecutionException.class, () -> queryFuture.get()); assertNotNull(ee.getCause()); Throwable rootCause = ExceptionsHelper.unwrap(ee, IllegalStateException.class); @@ -429,19 +541,11 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - if (clusters.isCcsMinimizeRoundtrips()) { - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - } else { - assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); - assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); - } + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); @@ -449,10 +553,23 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { } } + private static void assertOneFailedShard(SearchResponse.Cluster cluster, int totalShards) { + assertNotNull(cluster); + assertThat(cluster.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(cluster.getTotalShards(), equalTo(totalShards)); + assertThat(cluster.getSuccessfulShards(), equalTo(totalShards - 1)); + assertThat(cluster.getSkippedShards(), equalTo(0)); + assertThat(cluster.getFailedShards(), equalTo(1)); + assertThat(cluster.getFailures().size(), equalTo(1)); + assertThat(cluster.getTook().millis(), greaterThan(0L)); + ShardSearchFailure remoteShardSearchFailure = cluster.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + } + private Map setupTwoClusters() { String localIndex = "demo"; - int numShardsLocal = randomIntBetween(3, 6); - Settings localSettings = indexSettings(numShardsLocal, 0).build(); + int numShardsLocal = randomIntBetween(2, 10); + Settings localSettings = indexSettings(numShardsLocal, randomIntBetween(0, 1)).build(); assertAcked( client(LOCAL_CLUSTER).admin() .indices() @@ -463,17 +580,14 @@ private Map setupTwoClusters() { indexDocs(client(LOCAL_CLUSTER), localIndex); String remoteIndex = "prod"; - int numShardsRemote = randomIntBetween(3, 6); + int numShardsRemote = randomIntBetween(2, 10); final InternalTestCluster remoteCluster = cluster(REMOTE_CLUSTER); remoteCluster.ensureAtLeastNumDataNodes(randomIntBetween(1, 3)); - final Settings.Builder remoteSettings = Settings.builder(); - remoteSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsRemote); - assertAcked( client(REMOTE_CLUSTER).admin() .indices() .prepareCreate(remoteIndex) - .setSettings(Settings.builder().put(remoteSettings.build()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .setSettings(indexSettings(numShardsRemote, randomIntBetween(0, 1))) .setMapping("@timestamp", "type=date", "f", "type=text") ); assertFalse( @@ -503,7 +617,7 @@ private Map setupTwoClusters() { } private int indexDocs(Client client, String index) { - int numDocs = between(50, 100); + int numDocs = between(500, 1200); for (int i = 0; i < numDocs; i++) { long ts = EARLIEST_TIMESTAMP + i; if (i == numDocs - 1) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java index aa769fa565a87..08697bc1470fb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/ExplainableScriptIT.java @@ -16,7 +16,6 @@ import org.elasticsearch.action.search.SearchType; import org.elasticsearch.common.lucene.search.function.CombineFunction; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.fielddata.ScriptDocValues; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.script.DocReader; @@ -43,7 +42,6 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ExecutionException; import static org.elasticsearch.index.query.QueryBuilders.functionScoreQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery; @@ -76,7 +74,7 @@ public boolean needs_score() { } @Override - public ScoreScript newInstance(DocReader docReader) throws IOException { + public ScoreScript newInstance(DocReader docReader) { return new MyScript(params1, lookup, ((DocValuesDocReader) docReader).getLeafReaderContext()); } }; @@ -99,13 +97,13 @@ static class MyScript extends ScoreScript implements ExplainableScoreScript { @Override public Explanation explain(Explanation subQueryScore) throws IOException { - Explanation scoreExp = Explanation.match(subQueryScore.getValue(), "_score: ", subQueryScore); - return Explanation.match((float) (execute(null)), "This script returned " + execute(null), scoreExp); + double score = execute(null); + return Explanation.match((float) score, "This script returned " + score); } @Override public double execute(ExplanationHolder explanation) { - return ((Number) ((ScriptDocValues) getDoc().get("number_field")).get(0)).doubleValue(); + return ((Number) (getDoc().get("number_field")).get(0)).doubleValue(); } } @@ -114,7 +112,7 @@ protected Collection> nodePlugins() { return Arrays.asList(ExplainableScriptPlugin.class); } - public void testExplainScript() throws InterruptedException, IOException, ExecutionException { + public void testExplainScript() throws InterruptedException, IOException { List indexRequests = new ArrayList<>(); for (int i = 0; i < 20; i++) { indexRequests.add( @@ -146,8 +144,6 @@ public void testExplainScript() throws InterruptedException, IOException, Execut for (SearchHit hit : hits.getHits()) { assertThat(hit.getId(), equalTo(Integer.toString(idCounter))); assertThat(hit.getExplanation().toString(), containsString(Double.toString(idCounter))); - assertThat(hit.getExplanation().toString(), containsString("1 = n")); - assertThat(hit.getExplanation().toString(), containsString("1 = N")); assertThat(hit.getExplanation().getDetails().length, equalTo(2)); idCounter--; } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java index 1794ea8fbfab9..5c9c54a0d3b19 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/functionscore/FunctionScorePluginIT.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.search.SearchRequest; @@ -137,7 +138,7 @@ public DecayFunction getDecayFunction() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } private static final DecayFunction decayFunction = new LinearMultScoreFunction(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java index cc3d21efd71bc..c8a12b7a90e30 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/search/msearch/MultiSearchIT.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.msearch; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.search.MultiSearchRequest; import org.elasticsearch.action.search.MultiSearchResponse; import org.elasticsearch.common.settings.Settings; @@ -87,7 +88,7 @@ public void testSimpleMultiSearchMoreRequests() { * Test that triggering the CCS compatibility check with a query that shouldn't go to the minor before Version.CURRENT works */ public void testCCSCheckCompatibility() throws Exception { - TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_CCS_VERSION, true); + TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); createIndex("test"); ensureGreen(); client().prepareIndex("test").setId("1").setSource("field", "xxx").get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index cc425a8bb0fb7..11ab81d2591b1 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -62,6 +62,7 @@ import java.util.Optional; import java.util.Set; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -212,6 +213,7 @@ private static class TrackedCluster { // a single thread for "client" activities, to limit the number of activities all starting at once new ScalingExecutorBuilder(CLIENT, 1, 1, TimeValue.ZERO, true, CLIENT) ); + private final Executor clientExecutor = threadPool.executor(CLIENT); private final AtomicBoolean shouldStop = new AtomicBoolean(); private final InternalTestCluster cluster; @@ -393,7 +395,7 @@ private void enqueueAction(final CheckedRunnable action) { return; } - threadPool.scheduleUnlessShuttingDown(TimeValue.timeValueMillis(between(1, 500)), CLIENT, mustSucceed(action)); + threadPool.scheduleUnlessShuttingDown(TimeValue.timeValueMillis(between(1, 500)), clientExecutor, mustSucceed(action)); } private void startRestorer() { @@ -1068,27 +1070,26 @@ private void pollForSnapshotCompletion( Releasable onCompletion, Runnable onSuccess ) { - threadPool.executor(CLIENT) - .execute( - mustSucceed( - () -> client.admin() - .cluster() - .prepareGetSnapshots(repositoryName) - .setCurrentSnapshot() - .execute(mustSucceed(getSnapshotsResponse -> { - if (getSnapshotsResponse.getSnapshots() - .stream() - .noneMatch(snapshotInfo -> snapshotInfo.snapshotId().getName().equals(snapshotName))) { - - logger.info("--> snapshot [{}:{}] no longer running", repositoryName, snapshotName); - Releasables.close(onCompletion); - onSuccess.run(); - } else { - pollForSnapshotCompletion(client, repositoryName, snapshotName, onCompletion, onSuccess); - } - })) - ) - ); + clientExecutor.execute( + mustSucceed( + () -> client.admin() + .cluster() + .prepareGetSnapshots(repositoryName) + .setCurrentSnapshot() + .execute(mustSucceed(getSnapshotsResponse -> { + if (getSnapshotsResponse.getSnapshots() + .stream() + .noneMatch(snapshotInfo -> snapshotInfo.snapshotId().getName().equals(snapshotName))) { + + logger.info("--> snapshot [{}:{}] no longer running", repositoryName, snapshotName); + Releasables.close(onCompletion); + onSuccess.run(); + } else { + pollForSnapshotCompletion(client, repositoryName, snapshotName, onCompletion, onSuccess); + } + })) + ) + ); } private void startNodeRestarter() { diff --git a/server/src/main/java/module-info.java b/server/src/main/java/module-info.java index 2d4337358b32e..1395aae41e2af 100644 --- a/server/src/main/java/module-info.java +++ b/server/src/main/java/module-info.java @@ -170,6 +170,7 @@ exports org.elasticsearch.cluster.routing.allocation.command; exports org.elasticsearch.cluster.routing.allocation.decider; exports org.elasticsearch.cluster.service; + exports org.elasticsearch.cluster.version; exports org.elasticsearch.common; exports org.elasticsearch.common.blobstore; exports org.elasticsearch.common.blobstore.fs; diff --git a/server/src/main/java/org/elasticsearch/Build.java b/server/src/main/java/org/elasticsearch/Build.java index 9a7ea5fb79455..acb61e5fb46b6 100644 --- a/server/src/main/java/org/elasticsearch/Build.java +++ b/server/src/main/java/org/elasticsearch/Build.java @@ -191,7 +191,8 @@ static URL getElasticsearchCodeSourceLocation() { public static Build readBuild(StreamInput in) throws IOException { final String flavor; - if (in.getTransportVersion().before(TransportVersion.V_8_3_0) || in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_039)) { + if (in.getTransportVersion().before(TransportVersions.V_8_3_0) + || in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_039)) { flavor = in.readString(); } else { flavor = "default"; @@ -205,7 +206,7 @@ public static Build readBuild(StreamInput in) throws IOException { final String minWireVersion; final String minIndexVersion; final String displayString; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_041)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_041)) { minWireVersion = in.readString(); minIndexVersion = in.readString(); displayString = in.readString(); @@ -221,8 +222,8 @@ public static Build readBuild(StreamInput in) throws IOException { } public static void writeBuild(Build build, StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_3_0) - || out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_039)) { + if (out.getTransportVersion().before(TransportVersions.V_8_3_0) + || out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_039)) { out.writeString(build.flavor()); } out.writeString(build.type().displayName()); @@ -230,7 +231,7 @@ public static void writeBuild(Build build, StreamOutput out) throws IOException out.writeString(build.date()); out.writeBoolean(build.isSnapshot()); out.writeString(build.qualifiedVersion()); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_041)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_041)) { out.writeString(build.minWireCompatVersion()); out.writeString(build.minIndexCompatVersion()); out.writeString(build.displayString()); diff --git a/server/src/main/java/org/elasticsearch/ElasticsearchException.java b/server/src/main/java/org/elasticsearch/ElasticsearchException.java index 1de59edca8bdb..a333f62593dc2 100644 --- a/server/src/main/java/org/elasticsearch/ElasticsearchException.java +++ b/server/src/main/java/org/elasticsearch/ElasticsearchException.java @@ -72,7 +72,7 @@ */ public class ElasticsearchException extends RuntimeException implements ToXContentFragment, Writeable { - private static final TransportVersion UNKNOWN_VERSION_ADDED = TransportVersion.ZERO; + private static final TransportVersion UNKNOWN_VERSION_ADDED = TransportVersions.ZERO; /** * Passed in the {@link Params} of {@link #generateThrowableXContent(XContentBuilder, Params, Throwable)} @@ -298,14 +298,14 @@ private static Writer createNestingFunction(int thisLevel, Runnable n protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { out.writeOptionalString(this.getMessage()); nestedExceptionsWriter.write(out, this); - out.writeMapOfLists(headers, StreamOutput::writeString, StreamOutput::writeString); - out.writeMapOfLists(metadata, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeStringCollection); + out.writeMap(metadata, StreamOutput::writeStringCollection); } public static ElasticsearchException readException(StreamInput input, int id) throws IOException { CheckedFunction elasticsearchException = ID_TO_SUPPLIER.get(id); if (elasticsearchException == null) { - if (id == 127 && input.getTransportVersion().before(TransportVersion.V_7_5_0)) { + if (id == 127 && input.getTransportVersion().before(TransportVersions.V_7_5_0)) { // was SearchContextException return new SearchException(input); } @@ -1729,7 +1729,7 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.cluster.coordination.CoordinationStateRejectedException.class, org.elasticsearch.cluster.coordination.CoordinationStateRejectedException::new, 150, - TransportVersion.V_7_0_0 + TransportVersions.V_7_0_0 ), SNAPSHOT_IN_PROGRESS_EXCEPTION( org.elasticsearch.snapshots.SnapshotInProgressException.class, @@ -1765,88 +1765,88 @@ private enum ElasticsearchExceptionHandle { org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException.class, org.elasticsearch.index.seqno.RetentionLeaseInvalidRetainingSeqNoException::new, 156, - TransportVersion.V_7_5_0 + TransportVersions.V_7_5_0 ), INGEST_PROCESSOR_EXCEPTION( org.elasticsearch.ingest.IngestProcessorException.class, org.elasticsearch.ingest.IngestProcessorException::new, 157, - TransportVersion.V_7_5_0 + TransportVersions.V_7_5_0 ), PEER_RECOVERY_NOT_FOUND_EXCEPTION( org.elasticsearch.indices.recovery.PeerRecoveryNotFound.class, org.elasticsearch.indices.recovery.PeerRecoveryNotFound::new, 158, - TransportVersion.V_7_9_0 + TransportVersions.V_7_9_0 ), NODE_HEALTH_CHECK_FAILURE_EXCEPTION( org.elasticsearch.cluster.coordination.NodeHealthCheckFailureException.class, org.elasticsearch.cluster.coordination.NodeHealthCheckFailureException::new, 159, - TransportVersion.V_8_0_0 + TransportVersions.V_8_0_0 ), NO_SEED_NODE_LEFT_EXCEPTION( org.elasticsearch.transport.NoSeedNodeLeftException.class, org.elasticsearch.transport.NoSeedNodeLeftException::new, 160, - TransportVersion.V_7_10_0 + TransportVersions.V_7_10_0 ), VERSION_MISMATCH_EXCEPTION( org.elasticsearch.action.search.VersionMismatchException.class, org.elasticsearch.action.search.VersionMismatchException::new, 161, - TransportVersion.V_7_12_0 + TransportVersions.V_7_12_0 ), AUTHENTICATION_PROCESSING_ERROR( org.elasticsearch.ElasticsearchAuthenticationProcessingError.class, org.elasticsearch.ElasticsearchAuthenticationProcessingError::new, 162, - TransportVersion.V_7_16_0 + TransportVersions.V_7_16_0 ), REPOSITORY_CONFLICT_EXCEPTION( org.elasticsearch.repositories.RepositoryConflictException.class, org.elasticsearch.repositories.RepositoryConflictException::new, 163, - TransportVersion.V_8_0_0 + TransportVersions.V_8_0_0 ), DESIRED_NODES_VERSION_CONFLICT_EXCEPTION( org.elasticsearch.cluster.desirednodes.VersionConflictException.class, org.elasticsearch.cluster.desirednodes.VersionConflictException::new, 164, - TransportVersion.V_8_1_0 + TransportVersions.V_8_1_0 ), SNAPSHOT_NAME_ALREADY_IN_USE_EXCEPTION( org.elasticsearch.snapshots.SnapshotNameAlreadyInUseException.class, org.elasticsearch.snapshots.SnapshotNameAlreadyInUseException::new, 165, - TransportVersion.V_8_2_0 + TransportVersions.V_8_2_0 ), HEALTH_NODE_NOT_DISCOVERED_EXCEPTION( HealthNodeNotDiscoveredException.class, HealthNodeNotDiscoveredException::new, 166, - TransportVersion.V_8_5_0 + TransportVersions.V_8_5_0 ), UNSUPPORTED_AGGREGATION_ON_DOWNSAMPLED_INDEX_EXCEPTION( UnsupportedAggregationOnDownsampledIndex.class, UnsupportedAggregationOnDownsampledIndex::new, 167, - TransportVersion.V_8_5_0 + TransportVersions.V_8_5_0 ), - DOCUMENT_PARSING_EXCEPTION(DocumentParsingException.class, DocumentParsingException::new, 168, TransportVersion.V_8_8_0), + DOCUMENT_PARSING_EXCEPTION(DocumentParsingException.class, DocumentParsingException::new, 168, TransportVersions.V_8_8_0), HTTP_HEADERS_VALIDATION_EXCEPTION( org.elasticsearch.http.HttpHeadersValidationException.class, org.elasticsearch.http.HttpHeadersValidationException::new, 169, - TransportVersion.V_8_500_010 + TransportVersions.V_8_500_020 ), ROLE_RESTRICTION_EXCEPTION( ElasticsearchRoleRestrictionException.class, ElasticsearchRoleRestrictionException::new, 170, - TransportVersion.V_8_500_016 + TransportVersions.V_8_500_020 ), - API_NOT_AVAILABLE_EXCEPTION(ApiNotAvailableException.class, ApiNotAvailableException::new, 171, TransportVersion.V_8_500_065); + API_NOT_AVAILABLE_EXCEPTION(ApiNotAvailableException.class, ApiNotAvailableException::new, 171, TransportVersions.V_8_500_065); final Class exceptionClass; final CheckedFunction constructor; diff --git a/server/src/main/java/org/elasticsearch/TransportVersion.java b/server/src/main/java/org/elasticsearch/TransportVersion.java index f7f6ed6d40be5..efb3cca9a7c59 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersion.java +++ b/server/src/main/java/org/elasticsearch/TransportVersion.java @@ -8,22 +8,12 @@ package org.elasticsearch; -import org.elasticsearch.common.Strings; import org.elasticsearch.common.VersionId; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.core.Assertions; import org.elasticsearch.internal.VersionExtension; import java.io.IOException; -import java.lang.reflect.Field; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; -import java.util.NavigableMap; -import java.util.Set; -import java.util.TreeMap; /** * Represents the version of the wire protocol used to communicate between a pair of ES nodes. @@ -41,13 +31,13 @@ * those two merged commits. * *

Version compatibility

- * The earliest compatible version is hardcoded in the {@link #MINIMUM_COMPATIBLE} field. Previously, this was dynamically calculated from - * the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version numbers. So the - * minimum compatible version is hard-coded as the transport version used by the highest minor release of the previous major version. {@link - * #MINIMUM_COMPATIBLE} should be updated appropriately whenever a major release happens. + * The earliest compatible version is hardcoded in the {@link TransportVersions#MINIMUM_COMPATIBLE} field. Previously, this was dynamically + * calculated from the major/minor versions of {@link Version}, but {@code TransportVersion} does not have separate major/minor version + * numbers. So the minimum compatible version is hard-coded as the transport version used by the highest minor release of the previous + * major version. {@link TransportVersions#MINIMUM_COMPATIBLE} should be updated appropriately whenever a major release happens. *

- * The earliest CCS compatible version is hardcoded at {@link #MINIMUM_CCS_VERSION}, as the transport version used by the previous minor - * release. This should be updated appropriately whenever a minor release happens. + * The earliest CCS compatible version is hardcoded at {@link TransportVersions#MINIMUM_CCS_VERSION}, as the transport version used by the + * previous minor release. This should be updated appropriately whenever a minor release happens. * *

Scope of usefulness of {@link TransportVersion}

* {@link TransportVersion} is a property of the transport connection between a pair of nodes, and should not be used as an indication of @@ -59,232 +49,12 @@ */ public record TransportVersion(int id) implements VersionId { - /* - * NOTE: IntelliJ lies! - * This map is used during class construction, referenced by the registerTransportVersion method. - * When all the transport version constants have been registered, the map is cleared & never touched again. - */ - @SuppressWarnings("UnusedAssignment") - private static Map IDS = new HashMap<>(); - - private static TransportVersion registerTransportVersion(int id, String uniqueId) { - if (IDS == null) throw new IllegalStateException("The IDS map needs to be present to call this method"); - - Strings.requireNonEmpty(uniqueId, "Each TransportVersion needs a unique string id"); - Integer existing = IDS.put(uniqueId, id); - if (existing != null) { - throw new IllegalArgumentException("Versions " + id + " and " + existing + " have the same unique id"); - } - return new TransportVersion(id); - } - - public static final TransportVersion ZERO = registerTransportVersion(0, "00000000-0000-0000-0000-000000000000"); - public static final TransportVersion V_7_0_0 = registerTransportVersion(7_00_00_99, "7505fd05-d982-43ce-a63f-ff4c6c8bdeec"); - public static final TransportVersion V_7_0_1 = registerTransportVersion(7_00_01_99, "ae772780-e6f9-46a1-b0a0-20ed0cae37f7"); - public static final TransportVersion V_7_1_0 = registerTransportVersion(7_01_00_99, "fd09007c-1c54-450a-af99-9f941e1a53c2"); - public static final TransportVersion V_7_2_0 = registerTransportVersion(7_02_00_99, "b74dbc52-e727-472c-af21-2156482e8796"); - public static final TransportVersion V_7_2_1 = registerTransportVersion(7_02_01_99, "a3217b94-f436-4aab-a020-162c83ba18f2"); - public static final TransportVersion V_7_3_0 = registerTransportVersion(7_03_00_99, "4f04e4c9-c5aa-49e4-8b99-abeb4e284a5a"); - public static final TransportVersion V_7_3_2 = registerTransportVersion(7_03_02_99, "60da3953-8415-4d4f-a18d-853c3e68ebd6"); - public static final TransportVersion V_7_4_0 = registerTransportVersion(7_04_00_99, "ec7e58aa-55b4-4064-a9dd-fd723a2ba7a8"); - public static final TransportVersion V_7_5_0 = registerTransportVersion(7_05_00_99, "cc6e14dc-9dc7-4b74-8e15-1f99a6cfbe03"); - public static final TransportVersion V_7_6_0 = registerTransportVersion(7_06_00_99, "4637b8ae-f3df-43ae-a065-ad4c29f3373a"); - public static final TransportVersion V_7_7_0 = registerTransportVersion(7_07_00_99, "7bb73c48-ddb8-4437-b184-30371c35dd4b"); - public static final TransportVersion V_7_8_0 = registerTransportVersion(7_08_00_99, "c3cc74af-d15e-494b-a907-6ad6dd2f4660"); - public static final TransportVersion V_7_8_1 = registerTransportVersion(7_08_01_99, "7acb9f6e-32f2-45ce-b87d-ca1f165b8e7a"); - public static final TransportVersion V_7_9_0 = registerTransportVersion(7_09_00_99, "9388fe76-192a-4053-b51c-d2a7b8eae545"); - public static final TransportVersion V_7_10_0 = registerTransportVersion(7_10_00_99, "4efca195-38e4-4f74-b877-c26fb2a40733"); - public static final TransportVersion V_7_10_1 = registerTransportVersion(7_10_01_99, "0070260c-aa0b-4fc2-9c87-5cd5f23b005f"); - public static final TransportVersion V_7_11_0 = registerTransportVersion(7_11_00_99, "3b43bcbc-1c5e-4cc2-a3b4-8ac8b64239e8"); - public static final TransportVersion V_7_12_0 = registerTransportVersion(7_12_00_99, "3be9ff6f-2d9f-4fc2-ba91-394dd5ebcf33"); - public static final TransportVersion V_7_13_0 = registerTransportVersion(7_13_00_99, "e1fe494a-7c66-4571-8f8f-1d7e6d8df1b3"); - public static final TransportVersion V_7_14_0 = registerTransportVersion(7_14_00_99, "8cf0954c-b085-467f-b20b-3cb4b2e69e3e"); - public static final TransportVersion V_7_15_0 = registerTransportVersion(7_15_00_99, "2273ac0e-00bb-4024-9e2e-ab78981623c6"); - public static final TransportVersion V_7_15_1 = registerTransportVersion(7_15_01_99, "a8c3503d-3452-45cf-b385-e855e16547fe"); - public static final TransportVersion V_7_16_0 = registerTransportVersion(7_16_00_99, "59abadd2-25db-4547-a991-c92306a3934e"); - public static final TransportVersion V_7_17_0 = registerTransportVersion(7_17_00_99, "322efe93-4c73-4e15-9274-bb76836c8fa8"); - public static final TransportVersion V_7_17_1 = registerTransportVersion(7_17_01_99, "51c72842-7974-4669-ad25-bf13ba307307"); - public static final TransportVersion V_7_17_8 = registerTransportVersion(7_17_08_99, "82a3e70d-cf0e-4efb-ad16-6077ab9fe19f"); - public static final TransportVersion V_8_0_0 = registerTransportVersion(8_00_00_99, "c7d2372c-9f01-4a79-8b11-227d862dfe4f"); - public static final TransportVersion V_8_1_0 = registerTransportVersion(8_01_00_99, "3dc49dce-9cef-492a-ac8d-3cc79f6b4280"); - public static final TransportVersion V_8_2_0 = registerTransportVersion(8_02_00_99, "8ce6d555-202e-47db-ab7d-ade9dda1b7e8"); - public static final TransportVersion V_8_3_0 = registerTransportVersion(8_03_00_99, "559ddb66-d857-4208-bed5-a995ccf478ea"); - public static final TransportVersion V_8_4_0 = registerTransportVersion(8_04_00_99, "c0d12906-aa5b-45d4-94c7-cbcf4d9818ca"); - public static final TransportVersion V_8_5_0 = registerTransportVersion(8_05_00_99, "be3d7f23-7240-4904-9d7f-e25a0f766eca"); - public static final TransportVersion V_8_6_0 = registerTransportVersion(8_06_00_99, "e209c5ed-3488-4415-b561-33492ca3b789"); - public static final TransportVersion V_8_6_1 = registerTransportVersion(8_06_01_99, "9f113acb-1b21-4fda-bef9-2a3e669b5c7b"); - public static final TransportVersion V_8_7_0 = registerTransportVersion(8_07_00_99, "f1ee7a85-4fa6-43f5-8679-33e2b750448b"); - public static final TransportVersion V_8_7_1 = registerTransportVersion(8_07_01_99, "018de9d8-9e8b-4ac7-8f4b-3a6fbd0487fb"); - public static final TransportVersion V_8_8_0 = registerTransportVersion(8_08_00_99, "f64fe576-0767-4ec3-984e-3e30b33b6c46"); - public static final TransportVersion V_8_8_1 = registerTransportVersion(8_08_01_99, "291c71bb-5b0a-4b7e-a407-6e53bc128d0f"); - - /* - * READ THE COMMENT BELOW THiS BLOCK OF DECLARATIONS BEFORE ADDING NEW TRANSPORT VERSIONS - * Detached transport versions added below here. - */ - public static final TransportVersion V_8_500_010 = registerTransportVersion(8_500_010, "9818C628-1EEC-439B-B943-468F61460675"); - public static final TransportVersion V_8_500_011 = registerTransportVersion(8_500_011, "2209F28D-B52E-4BC4-9889-E780F291C32E"); - public static final TransportVersion V_8_500_012 = registerTransportVersion(8_500_012, "BB6F4AF1-A860-4FD4-A138-8150FFBE0ABD"); - public static final TransportVersion V_8_500_013 = registerTransportVersion(8_500_013, "f65b85ac-db5e-4558-a487-a1dde4f6a33a"); - public static final TransportVersion V_8_500_014 = registerTransportVersion(8_500_014, "D115A2E1-1739-4A02-AB7B-64F6EA157EFB"); - public static final TransportVersion V_8_500_015 = registerTransportVersion(8_500_015, "651216c9-d54f-4189-9fe1-48d82d276863"); - public static final TransportVersion V_8_500_016 = registerTransportVersion(8_500_016, "492C94FB-AAEA-4C9E-8375-BDB67A398584"); - public static final TransportVersion V_8_500_017 = registerTransportVersion(8_500_017, "0EDCB5BA-049C-443C-8AB1-5FA58FB996FB"); - public static final TransportVersion V_8_500_018 = registerTransportVersion(8_500_018, "827C32CE-33D9-4AC3-A773-8FB768F59EAF"); - public static final TransportVersion V_8_500_019 = registerTransportVersion(8_500_019, "09bae57f-cab8-423c-aab3-c9778509ffe3"); - public static final TransportVersion V_8_500_020 = registerTransportVersion(8_500_020, "ECB42C26-B258-42E5-A835-E31AF84A76DE"); - public static final TransportVersion V_8_500_021 = registerTransportVersion(8_500_021, "102e0d84-0c08-402c-a696-935f3a3da873"); - public static final TransportVersion V_8_500_022 = registerTransportVersion(8_500_022, "4993c724-7a81-4955-84e7-403484610091"); - public static final TransportVersion V_8_500_023 = registerTransportVersion(8_500_023, "01b06435-5d73-42ff-a121-3b36b771375e"); - public static final TransportVersion V_8_500_024 = registerTransportVersion(8_500_024, "db337007-f823-4dbd-968e-375383814c17"); - public static final TransportVersion V_8_500_025 = registerTransportVersion(8_500_025, "b2ab7b75-5ac2-4a3b-bbb6-8789ca66722d"); - public static final TransportVersion V_8_500_026 = registerTransportVersion(8_500_026, "965d294b-14aa-4abb-bcfc-34631187941d"); - public static final TransportVersion V_8_500_027 = registerTransportVersion(8_500_027, "B151D967-8E7C-401C-8275-0ABC06335F2D"); - public static final TransportVersion V_8_500_028 = registerTransportVersion(8_500_028, "a6592d08-15cb-4e1a-b9b4-b2ba24058444"); - public static final TransportVersion V_8_500_029 = registerTransportVersion(8_500_029, "f3bd98af-6187-e161-e315-718a2fecc2db"); - public static final TransportVersion V_8_500_030 = registerTransportVersion(8_500_030, "b72d7f12-8ed3-4a5b-8e6a-4910ea10e0d7"); - public static final TransportVersion V_8_500_031 = registerTransportVersion(8_500_031, "e7aa7e95-37e7-46a3-aad1-90a21c0769e7"); - public static final TransportVersion V_8_500_032 = registerTransportVersion(8_500_032, "a9a14bc6-c3f2-41d9-a3d8-c686bf2c901d"); - public static final TransportVersion V_8_500_033 = registerTransportVersion(8_500_033, "193ab7c4-a751-4cbd-a66a-2d7d56ccbc10"); - public static final TransportVersion V_8_500_034 = registerTransportVersion(8_500_034, "16871c8b-88ba-4432-980a-10fd9ecad2dc"); - public static final TransportVersion V_8_500_035 = registerTransportVersion(8_500_035, "664dd6ce-3487-4fbd-81a9-af778b28be45"); - public static final TransportVersion V_8_500_036 = registerTransportVersion(8_500_036, "3343c64f-d7ac-4f02-9262-3e1acfc56f89"); - public static final TransportVersion V_8_500_037 = registerTransportVersion(8_500_037, "d76a4f22-8878-43e0-acfa-15e452195fa7"); - public static final TransportVersion V_8_500_038 = registerTransportVersion(8_500_038, "9ef93580-feae-409f-9989-b49e411ca7a9"); - public static final TransportVersion V_8_500_039 = registerTransportVersion(8_500_039, "c23722d7-6139-4cf2-b8a1-600fbd4ec359"); - public static final TransportVersion V_8_500_040 = registerTransportVersion(8_500_040, "8F3AA068-A608-4A16-9683-2412A75BF2DD"); - public static final TransportVersion V_8_500_041 = registerTransportVersion(8_500_041, "5b6a0fd0-ac0b-443f-baae-cffec140905c"); - public static final TransportVersion V_8_500_042 = registerTransportVersion(8_500_042, "763b4801-a4fc-47c4-aff5-7f5a757b8a07"); - public static final TransportVersion V_8_500_043 = registerTransportVersion(8_500_043, "50babd14-7f5c-4f8c-9351-94e0d397aabc"); - public static final TransportVersion V_8_500_044 = registerTransportVersion(8_500_044, "96b83320-2317-4e9d-b735-356f18c1d76a"); - public static final TransportVersion V_8_500_045 = registerTransportVersion(8_500_045, "24a596dd-c843-4c0a-90b3-759697d74026"); - public static final TransportVersion V_8_500_046 = registerTransportVersion(8_500_046, "61666d4c-a4f0-40db-8a3d-4806718247c5"); - public static final TransportVersion V_8_500_047 = registerTransportVersion(8_500_047, "4b1682fe-c37e-4184-80f6-7d57fcba9b3d"); - public static final TransportVersion V_8_500_048 = registerTransportVersion(8_500_048, "f9658aa5-f066-4edb-bcb9-40bf256c9294"); - public static final TransportVersion V_8_500_049 = registerTransportVersion(8_500_049, "828bb6ce-2fbb-11ee-be56-0242ac120002"); - public static final TransportVersion V_8_500_050 = registerTransportVersion(8_500_050, "69722fa2-7c0a-4227-86fb-6d6a9a0a0321"); - public static final TransportVersion V_8_500_051 = registerTransportVersion(8_500_051, "a28b43bc-bb5f-4406-afcf-26900aa98a71"); - public static final TransportVersion V_8_500_052 = registerTransportVersion(8_500_052, "2d382b3d-9838-4cce-84c8-4142113e5c2b"); - public static final TransportVersion V_8_500_053 = registerTransportVersion(8_500_053, "aa603bae-01e2-380a-8950-6604468e8c6d"); - public static final TransportVersion V_8_500_054 = registerTransportVersion(8_500_054, "b76ef950-af03-4dda-85c2-6400ec442e7e"); - public static final TransportVersion V_8_500_055 = registerTransportVersion(8_500_055, "7831c609-0df1-42d6-aa97-8a346c389ef"); - public static final TransportVersion V_8_500_056 = registerTransportVersion(8_500_056, "afa8c4be-29c9-48ab-b1ed-7182415c1b71"); - public static final TransportVersion V_8_500_057 = registerTransportVersion(8_500_057, "80c088c6-358d-43b2-8d9c-1ea3c6c2b9fd"); - public static final TransportVersion V_8_500_058 = registerTransportVersion(8_500_058, "41d9c98a-1de2-4dc1-86f1-abd4cc1bef57"); - public static final TransportVersion V_8_500_059 = registerTransportVersion(8_500_059, "2f2090c0-7cd0-4a10-8f02-63d26073604f"); - public static final TransportVersion V_8_500_060 = registerTransportVersion(8_500_060, "ec065a44-b468-4f8a-aded-7b90ca8d792b"); - // 8.10.0 release version is: - public static final TransportVersion V_8_500_061 = registerTransportVersion(8_500_061, "4e07f830-8be4-448c-851e-62b3d2f0bf0a"); - public static final TransportVersion V_8_500_062 = registerTransportVersion(8_500_062, "09CD9C9B-3207-4B40-8756-B7A12001A885"); - public static final TransportVersion V_8_500_063 = registerTransportVersion(8_500_063, "31dedced-0055-4f34-b952-2f6919be7488"); - public static final TransportVersion V_8_500_064 = registerTransportVersion(8_500_064, "3a795175-5e6f-40ff-90fe-5571ea8ab04e"); - public static final TransportVersion V_8_500_065 = registerTransportVersion(8_500_065, "4e253c58-1b3d-11ee-be56-0242ac120002"); - - /* - * STOP! READ THIS FIRST! No, really, - * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ - * / ___|_ _/ _ \| _ \| | | _ \| ____| / \ | _ \ |_ _| | | |_ _/ ___| | ___|_ _| _ \/ ___|_ _| | - * \___ \ | || | | | |_) | | | |_) | _| / _ \ | | | | | | | |_| || |\___ \ | |_ | || |_) \___ \ | | | | - * ___) || || |_| | __/|_| | _ <| |___ / ___ \| |_| | | | | _ || | ___) | | _| | || _ < ___) || | |_| - * |____/ |_| \___/|_| (_) |_| \_\_____/_/ \_\____/ |_| |_| |_|___|____/ |_| |___|_| \_\____/ |_| (_) - * - * A new transport version should be added EVERY TIME a change is made to the serialization protocol of one or more classes. Each - * transport version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_8_1). - * - * To add a new transport version, add a new constant at the bottom of the list, above this comment, which is one greater than the - * current highest version, ensure it has a fresh UUID, and update CurrentHolder#CURRENT to point to the new version. Don't add other - * lines, comments, etc. - * - * REVERTING A TRANSPORT VERSION - * - * If you revert a commit with a transport version change, you MUST ensure there is a NEW transport version representing the reverted - * change. DO NOT let the transport version go backwards, it must ALWAYS be incremented. - */ - - private static class CurrentHolder { - private static final TransportVersion CURRENT = findCurrent(V_8_500_065); - - // finds the pluggable current version, or uses the given fallback - private static TransportVersion findCurrent(TransportVersion fallback) { - var versionExtension = VersionExtension.load(); - if (versionExtension == null) { - return fallback; - } - var version = versionExtension.getCurrentTransportVersion(); - assert version.onOrAfter(fallback); - return version; - } - } - - /** - * Reference to the earliest compatible transport version to this version of the codebase. - * This should be the transport version used by the highest minor version of the previous major. - */ - public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0; - - /** - * Reference to the minimum transport version that can be used with CCS. - * This should be the transport version used by the previous minor release. - */ - public static final TransportVersion MINIMUM_CCS_VERSION = V_8_500_061; - - static { - // see comment on IDS field - // now we're registered all the transport versions, we can clear the map - IDS = null; - } - - static NavigableMap getAllVersionIds(Class cls) { - Map versionIdFields = new HashMap<>(); - NavigableMap builder = new TreeMap<>(); - - Set ignore = Set.of("ZERO", "CURRENT", "MINIMUM_COMPATIBLE", "MINIMUM_CCS_VERSION"); - - for (Field declaredField : cls.getFields()) { - if (declaredField.getType().equals(TransportVersion.class)) { - String fieldName = declaredField.getName(); - if (ignore.contains(fieldName)) { - continue; - } - - TransportVersion version; - try { - version = (TransportVersion) declaredField.get(null); - } catch (IllegalAccessException e) { - throw new AssertionError(e); - } - builder.put(version.id, version); - - if (Assertions.ENABLED) { - // check the version number is unique - var sameVersionNumber = versionIdFields.put(version.id, fieldName); - assert sameVersionNumber == null - : "Versions [" - + sameVersionNumber - + "] and [" - + fieldName - + "] have the same version number [" - + version.id - + "]. Each TransportVersion should have a different version number"; - } - } - } - - return Collections.unmodifiableNavigableMap(builder); - } - - private static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersion.class); - - static Collection getAllVersions() { - return VERSION_IDS.values(); - } - public static TransportVersion readVersion(StreamInput in) throws IOException { return fromId(in.readVInt()); } public static TransportVersion fromId(int id) { - TransportVersion known = VERSION_IDS.get(id); + TransportVersion known = TransportVersions.VERSION_IDS.get(id); if (known != null) { return known; } @@ -314,7 +84,7 @@ public static TransportVersion max(TransportVersion version1, TransportVersion v * Returns {@code true} if the specified version is compatible with this running version of Elasticsearch. */ public static boolean isCompatible(TransportVersion version) { - return version.onOrAfter(MINIMUM_COMPATIBLE); + return version.onOrAfter(TransportVersions.MINIMUM_COMPATIBLE); } /** @@ -333,4 +103,19 @@ public static TransportVersion fromString(String str) { public String toString() { return Integer.toString(id); } + + private static class CurrentHolder { + private static final TransportVersion CURRENT = findCurrent(); + + // finds the pluggable current version, or uses the given fallback + private static TransportVersion findCurrent() { + var versionExtension = VersionExtension.load(); + if (versionExtension == null) { + return TransportVersions.LATEST_DEFINED; + } + var version = versionExtension.getCurrentTransportVersion(); + assert version.onOrAfter(TransportVersions.LATEST_DEFINED); + return version; + } + } } diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java new file mode 100644 index 0000000000000..a5b456236a94d --- /dev/null +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -0,0 +1,242 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.core.Assertions; + +import java.lang.reflect.Field; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Set; +import java.util.TreeMap; + +public class TransportVersions { + + /* + * NOTE: IntelliJ lies! + * This map is used during class construction, referenced by the registerTransportVersion method. + * When all the transport version constants have been registered, the map is cleared & never touched again. + */ + static Map IDS = new HashMap<>(); + + static TransportVersion def(int id, String uniqueId) { + if (IDS == null) throw new IllegalStateException("The IDS map needs to be present to call this method"); + + Strings.requireNonEmpty(uniqueId, "Each TransportVersion needs a unique string id"); + Integer existing = IDS.put(uniqueId, id); + if (existing != null) { + throw new IllegalArgumentException("Versions " + id + " and " + existing + " have the same unique id"); + } + return new TransportVersion(id); + } + + public static final TransportVersion ZERO = def(0, "00000000-0000-0000-0000-000000000000"); + public static final TransportVersion V_7_0_0 = def(7_00_00_99, "7505fd05-d982-43ce-a63f-ff4c6c8bdeec"); + public static final TransportVersion V_7_0_1 = def(7_00_01_99, "ae772780-e6f9-46a1-b0a0-20ed0cae37f7"); + public static final TransportVersion V_7_1_0 = def(7_01_00_99, "fd09007c-1c54-450a-af99-9f941e1a53c2"); + public static final TransportVersion V_7_2_0 = def(7_02_00_99, "b74dbc52-e727-472c-af21-2156482e8796"); + public static final TransportVersion V_7_2_1 = def(7_02_01_99, "a3217b94-f436-4aab-a020-162c83ba18f2"); + public static final TransportVersion V_7_3_0 = def(7_03_00_99, "4f04e4c9-c5aa-49e4-8b99-abeb4e284a5a"); + public static final TransportVersion V_7_3_2 = def(7_03_02_99, "60da3953-8415-4d4f-a18d-853c3e68ebd6"); + public static final TransportVersion V_7_4_0 = def(7_04_00_99, "ec7e58aa-55b4-4064-a9dd-fd723a2ba7a8"); + public static final TransportVersion V_7_5_0 = def(7_05_00_99, "cc6e14dc-9dc7-4b74-8e15-1f99a6cfbe03"); + public static final TransportVersion V_7_6_0 = def(7_06_00_99, "4637b8ae-f3df-43ae-a065-ad4c29f3373a"); + public static final TransportVersion V_7_7_0 = def(7_07_00_99, "7bb73c48-ddb8-4437-b184-30371c35dd4b"); + public static final TransportVersion V_7_8_0 = def(7_08_00_99, "c3cc74af-d15e-494b-a907-6ad6dd2f4660"); + public static final TransportVersion V_7_8_1 = def(7_08_01_99, "7acb9f6e-32f2-45ce-b87d-ca1f165b8e7a"); + public static final TransportVersion V_7_9_0 = def(7_09_00_99, "9388fe76-192a-4053-b51c-d2a7b8eae545"); + public static final TransportVersion V_7_10_0 = def(7_10_00_99, "4efca195-38e4-4f74-b877-c26fb2a40733"); + public static final TransportVersion V_7_10_1 = def(7_10_01_99, "0070260c-aa0b-4fc2-9c87-5cd5f23b005f"); + public static final TransportVersion V_7_11_0 = def(7_11_00_99, "3b43bcbc-1c5e-4cc2-a3b4-8ac8b64239e8"); + public static final TransportVersion V_7_12_0 = def(7_12_00_99, "3be9ff6f-2d9f-4fc2-ba91-394dd5ebcf33"); + public static final TransportVersion V_7_13_0 = def(7_13_00_99, "e1fe494a-7c66-4571-8f8f-1d7e6d8df1b3"); + public static final TransportVersion V_7_14_0 = def(7_14_00_99, "8cf0954c-b085-467f-b20b-3cb4b2e69e3e"); + public static final TransportVersion V_7_15_0 = def(7_15_00_99, "2273ac0e-00bb-4024-9e2e-ab78981623c6"); + public static final TransportVersion V_7_15_1 = def(7_15_01_99, "a8c3503d-3452-45cf-b385-e855e16547fe"); + public static final TransportVersion V_7_16_0 = def(7_16_00_99, "59abadd2-25db-4547-a991-c92306a3934e"); + public static final TransportVersion V_7_17_0 = def(7_17_00_99, "322efe93-4c73-4e15-9274-bb76836c8fa8"); + public static final TransportVersion V_7_17_1 = def(7_17_01_99, "51c72842-7974-4669-ad25-bf13ba307307"); + public static final TransportVersion V_7_17_8 = def(7_17_08_99, "82a3e70d-cf0e-4efb-ad16-6077ab9fe19f"); + public static final TransportVersion V_8_0_0 = def(8_00_00_99, "c7d2372c-9f01-4a79-8b11-227d862dfe4f"); + public static final TransportVersion V_8_1_0 = def(8_01_00_99, "3dc49dce-9cef-492a-ac8d-3cc79f6b4280"); + public static final TransportVersion V_8_2_0 = def(8_02_00_99, "8ce6d555-202e-47db-ab7d-ade9dda1b7e8"); + public static final TransportVersion V_8_3_0 = def(8_03_00_99, "559ddb66-d857-4208-bed5-a995ccf478ea"); + public static final TransportVersion V_8_4_0 = def(8_04_00_99, "c0d12906-aa5b-45d4-94c7-cbcf4d9818ca"); + public static final TransportVersion V_8_5_0 = def(8_05_00_99, "be3d7f23-7240-4904-9d7f-e25a0f766eca"); + public static final TransportVersion V_8_6_0 = def(8_06_00_99, "e209c5ed-3488-4415-b561-33492ca3b789"); + public static final TransportVersion V_8_6_1 = def(8_06_01_99, "9f113acb-1b21-4fda-bef9-2a3e669b5c7b"); + public static final TransportVersion V_8_7_0 = def(8_07_00_99, "f1ee7a85-4fa6-43f5-8679-33e2b750448b"); + public static final TransportVersion V_8_7_1 = def(8_07_01_99, "018de9d8-9e8b-4ac7-8f4b-3a6fbd0487fb"); + public static final TransportVersion V_8_8_0 = def(8_08_00_99, "f64fe576-0767-4ec3-984e-3e30b33b6c46"); + public static final TransportVersion V_8_8_1 = def(8_08_01_99, "291c71bb-5b0a-4b7e-a407-6e53bc128d0f"); + /* + * READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW TRANSPORT VERSIONS + * Detached transport versions added below here. + */ + public static final TransportVersion V_8_500_020 = def(8_500_020, "ECB42C26-B258-42E5-A835-E31AF84A76DE"); + public static final TransportVersion V_8_500_021 = def(8_500_021, "102e0d84-0c08-402c-a696-935f3a3da873"); + public static final TransportVersion V_8_500_022 = def(8_500_022, "4993c724-7a81-4955-84e7-403484610091"); + public static final TransportVersion V_8_500_023 = def(8_500_023, "01b06435-5d73-42ff-a121-3b36b771375e"); + public static final TransportVersion V_8_500_024 = def(8_500_024, "db337007-f823-4dbd-968e-375383814c17"); + public static final TransportVersion V_8_500_025 = def(8_500_025, "b2ab7b75-5ac2-4a3b-bbb6-8789ca66722d"); + public static final TransportVersion V_8_500_026 = def(8_500_026, "965d294b-14aa-4abb-bcfc-34631187941d"); + public static final TransportVersion V_8_500_027 = def(8_500_027, "B151D967-8E7C-401C-8275-0ABC06335F2D"); + public static final TransportVersion V_8_500_028 = def(8_500_028, "a6592d08-15cb-4e1a-b9b4-b2ba24058444"); + public static final TransportVersion V_8_500_029 = def(8_500_029, "f3bd98af-6187-e161-e315-718a2fecc2db"); + public static final TransportVersion V_8_500_030 = def(8_500_030, "b72d7f12-8ed3-4a5b-8e6a-4910ea10e0d7"); + public static final TransportVersion V_8_500_031 = def(8_500_031, "e7aa7e95-37e7-46a3-aad1-90a21c0769e7"); + public static final TransportVersion V_8_500_032 = def(8_500_032, "a9a14bc6-c3f2-41d9-a3d8-c686bf2c901d"); + public static final TransportVersion V_8_500_033 = def(8_500_033, "193ab7c4-a751-4cbd-a66a-2d7d56ccbc10"); + public static final TransportVersion V_8_500_034 = def(8_500_034, "16871c8b-88ba-4432-980a-10fd9ecad2dc"); + public static final TransportVersion V_8_500_035 = def(8_500_035, "664dd6ce-3487-4fbd-81a9-af778b28be45"); + public static final TransportVersion V_8_500_036 = def(8_500_036, "3343c64f-d7ac-4f02-9262-3e1acfc56f89"); + public static final TransportVersion V_8_500_037 = def(8_500_037, "d76a4f22-8878-43e0-acfa-15e452195fa7"); + public static final TransportVersion V_8_500_038 = def(8_500_038, "9ef93580-feae-409f-9989-b49e411ca7a9"); + public static final TransportVersion V_8_500_039 = def(8_500_039, "c23722d7-6139-4cf2-b8a1-600fbd4ec359"); + public static final TransportVersion V_8_500_040 = def(8_500_040, "8F3AA068-A608-4A16-9683-2412A75BF2DD"); + public static final TransportVersion V_8_500_041 = def(8_500_041, "5b6a0fd0-ac0b-443f-baae-cffec140905c"); + public static final TransportVersion V_8_500_042 = def(8_500_042, "763b4801-a4fc-47c4-aff5-7f5a757b8a07"); + public static final TransportVersion V_8_500_043 = def(8_500_043, "50babd14-7f5c-4f8c-9351-94e0d397aabc"); + public static final TransportVersion V_8_500_044 = def(8_500_044, "96b83320-2317-4e9d-b735-356f18c1d76a"); + public static final TransportVersion V_8_500_045 = def(8_500_045, "24a596dd-c843-4c0a-90b3-759697d74026"); + public static final TransportVersion V_8_500_046 = def(8_500_046, "61666d4c-a4f0-40db-8a3d-4806718247c5"); + public static final TransportVersion V_8_500_047 = def(8_500_047, "4b1682fe-c37e-4184-80f6-7d57fcba9b3d"); + public static final TransportVersion V_8_500_048 = def(8_500_048, "f9658aa5-f066-4edb-bcb9-40bf256c9294"); + public static final TransportVersion V_8_500_049 = def(8_500_049, "828bb6ce-2fbb-11ee-be56-0242ac120002"); + public static final TransportVersion V_8_500_050 = def(8_500_050, "69722fa2-7c0a-4227-86fb-6d6a9a0a0321"); + public static final TransportVersion V_8_500_051 = def(8_500_051, "a28b43bc-bb5f-4406-afcf-26900aa98a71"); + public static final TransportVersion V_8_500_052 = def(8_500_052, "2d382b3d-9838-4cce-84c8-4142113e5c2b"); + public static final TransportVersion V_8_500_053 = def(8_500_053, "aa603bae-01e2-380a-8950-6604468e8c6d"); + public static final TransportVersion V_8_500_054 = def(8_500_054, "b76ef950-af03-4dda-85c2-6400ec442e7e"); + public static final TransportVersion V_8_500_055 = def(8_500_055, "7831c609-0df1-42d6-aa97-8a346c389ef"); + public static final TransportVersion V_8_500_056 = def(8_500_056, "afa8c4be-29c9-48ab-b1ed-7182415c1b71"); + public static final TransportVersion V_8_500_057 = def(8_500_057, "80c088c6-358d-43b2-8d9c-1ea3c6c2b9fd"); + public static final TransportVersion V_8_500_058 = def(8_500_058, "41d9c98a-1de2-4dc1-86f1-abd4cc1bef57"); + public static final TransportVersion V_8_500_059 = def(8_500_059, "2f2090c0-7cd0-4a10-8f02-63d26073604f"); + public static final TransportVersion V_8_500_060 = def(8_500_060, "ec065a44-b468-4f8a-aded-7b90ca8d792b"); + public static final TransportVersion V_8_500_061 = def(8_500_061, "4e07f830-8be4-448c-851e-62b3d2f0bf0a"); + public static final TransportVersion V_8_500_062 = def(8_500_062, "09CD9C9B-3207-4B40-8756-B7A12001A885"); + public static final TransportVersion V_8_500_063 = def(8_500_063, "31dedced-0055-4f34-b952-2f6919be7488"); + public static final TransportVersion V_8_500_064 = def(8_500_064, "3a795175-5e6f-40ff-90fe-5571ea8ab04e"); + public static final TransportVersion V_8_500_065 = def(8_500_065, "4e253c58-1b3d-11ee-be56-0242ac120002"); + public static final TransportVersion V_8_500_066 = def(8_500_066, "F398ECC6-5D2A-4BD8-A9E8-1101F030DF85"); + public static final TransportVersion V_8_500_067 = def(8_500_067, "a7c86604-a917-4aff-9a1b-a4d44c3dbe02"); + public static final TransportVersion V_8_500_068 = def(8_500_068, "2683c8b4-5372-4a6a-bb3a-d61aa679089a"); + public static final TransportVersion V_8_500_069 = def(8_500_069, "5b804027-d8a0-421b-9970-1f53d766854b"); + public static final TransportVersion V_8_500_070 = def(8_500_070, "6BADC9CD-3C9D-4381-8BD9-B305CAA93F86"); + public static final TransportVersion V_8_500_071 = def(8_500_071, "a86dfc08-3026-4f01-90ef-6d6de003e217"); + /* + * STOP! READ THIS FIRST! No, really, + * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ + * / ___|_ _/ _ \| _ \| | | _ \| ____| / \ | _ \ |_ _| | | |_ _/ ___| | ___|_ _| _ \/ ___|_ _| | + * \___ \ | || | | | |_) | | | |_) | _| / _ \ | | | | | | | |_| || |\___ \ | |_ | || |_) \___ \ | | | | + * ___) || || |_| | __/|_| | _ <| |___ / ___ \| |_| | | | | _ || | ___) | | _| | || _ < ___) || | |_| + * |____/ |_| \___/|_| (_) |_| \_\_____/_/ \_\____/ |_| |_| |_|___|____/ |_| |___|_| \_\____/ |_| (_) + * + * A new transport version should be added EVERY TIME a change is made to the serialization protocol of one or more classes. Each + * transport version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_8_1). + * + * To add a new transport version, add a new constant at the bottom of the list, above this comment, which is one greater than the + * current highest version and ensure it has a fresh UUID. Don't add other lines, comments, etc. + * + * REVERTING A TRANSPORT VERSION + * + * If you revert a commit with a transport version change, you MUST ensure there is a NEW transport version representing the reverted + * change. DO NOT let the transport version go backwards, it must ALWAYS be incremented. + * + * DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY + * + * If your git checkout has the expected minor-version-numbered branches and the expected release-version tags then you can find the + * transport versions known by a particular release ... + * + * git show v8.9.1:server/src/main/java/org/elasticsearch/TransportVersions.java | grep def + * + * ... or by a particular branch ... + * + * git show 8.10:server/src/main/java/org/elasticsearch/TransportVersions.java | grep def + * + * ... and you can see which versions were added in between two versions too ... + * + * git diff 8.10..main -- server/src/main/java/org/elasticsearch/TransportVersions.java + */ + + /** + * Reference to the earliest compatible transport version to this version of the codebase. + * This should be the transport version used by the highest minor version of the previous major. + */ + public static final TransportVersion MINIMUM_COMPATIBLE = V_7_17_0; + + /** + * Reference to the minimum transport version that can be used with CCS. + * This should be the transport version used by the previous minor release. + */ + public static final TransportVersion MINIMUM_CCS_VERSION = V_8_500_061; + + static final NavigableMap VERSION_IDS = getAllVersionIds(TransportVersions.class); + + // the highest transport version constant defined in this file, used as a fallback for TransportVersion.current() + static final TransportVersion LATEST_DEFINED; + static { + LATEST_DEFINED = VERSION_IDS.lastEntry().getValue(); + + // see comment on IDS field + // now we're registered all the transport versions, we can clear the map + IDS = null; + } + + static NavigableMap getAllVersionIds(Class cls) { + Map versionIdFields = new HashMap<>(); + NavigableMap builder = new TreeMap<>(); + + Set ignore = Set.of("ZERO", "CURRENT", "MINIMUM_COMPATIBLE", "MINIMUM_CCS_VERSION"); + + for (Field declaredField : cls.getFields()) { + if (declaredField.getType().equals(TransportVersion.class)) { + String fieldName = declaredField.getName(); + if (ignore.contains(fieldName)) { + continue; + } + + TransportVersion version; + try { + version = (TransportVersion) declaredField.get(null); + } catch (IllegalAccessException e) { + throw new AssertionError(e); + } + builder.put(version.id(), version); + + if (Assertions.ENABLED) { + // check the version number is unique + var sameVersionNumber = versionIdFields.put(version.id(), fieldName); + assert sameVersionNumber == null + : "Versions [" + + sameVersionNumber + + "] and [" + + fieldName + + "] have the same version number [" + + version.id() + + "]. Each TransportVersion should have a different version number"; + } + } + } + + return Collections.unmodifiableNavigableMap(builder); + } + + static Collection getAllVersions() { + return VERSION_IDS.values(); + } + + // no instance + private TransportVersions() {} +} diff --git a/server/src/main/java/org/elasticsearch/Version.java b/server/src/main/java/org/elasticsearch/Version.java index fef6186608110..37b382db2f2ca 100644 --- a/server/src/main/java/org/elasticsearch/Version.java +++ b/server/src/main/java/org/elasticsearch/Version.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.Assertions; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.SuppressForbidden; -import org.elasticsearch.index.IndexVersion; import org.elasticsearch.monitor.jvm.JvmInfo; import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; @@ -30,7 +29,6 @@ import java.util.Locale; import java.util.Map; import java.util.NavigableMap; -import java.util.Objects; import java.util.TreeMap; public class Version implements VersionId, ToXContentFragment { @@ -51,105 +49,107 @@ public class Version implements VersionId, ToXContentFragment { */ public static final int V_EMPTY_ID = 0; - public static final Version V_EMPTY = new Version(V_EMPTY_ID, IndexVersion.ZERO); - public static final Version V_7_0_0 = new Version(7_00_00_99, IndexVersion.V_7_0_0); - public static final Version V_7_0_1 = new Version(7_00_01_99, IndexVersion.V_7_0_1); - public static final Version V_7_1_0 = new Version(7_01_00_99, IndexVersion.V_7_1_0); - public static final Version V_7_1_1 = new Version(7_01_01_99, IndexVersion.V_7_1_1); - public static final Version V_7_2_0 = new Version(7_02_00_99, IndexVersion.V_7_2_0); - public static final Version V_7_2_1 = new Version(7_02_01_99, IndexVersion.V_7_2_1); - public static final Version V_7_3_0 = new Version(7_03_00_99, IndexVersion.V_7_3_0); - public static final Version V_7_3_1 = new Version(7_03_01_99, IndexVersion.V_7_3_1); - public static final Version V_7_3_2 = new Version(7_03_02_99, IndexVersion.V_7_3_2); - public static final Version V_7_4_0 = new Version(7_04_00_99, IndexVersion.V_7_4_0); - public static final Version V_7_4_1 = new Version(7_04_01_99, IndexVersion.V_7_4_1); - public static final Version V_7_4_2 = new Version(7_04_02_99, IndexVersion.V_7_4_2); - public static final Version V_7_5_0 = new Version(7_05_00_99, IndexVersion.V_7_5_0); - public static final Version V_7_5_1 = new Version(7_05_01_99, IndexVersion.V_7_5_1); - public static final Version V_7_5_2 = new Version(7_05_02_99, IndexVersion.V_7_5_2); - public static final Version V_7_6_0 = new Version(7_06_00_99, IndexVersion.V_7_6_0); - public static final Version V_7_6_1 = new Version(7_06_01_99, IndexVersion.V_7_6_1); - public static final Version V_7_6_2 = new Version(7_06_02_99, IndexVersion.V_7_6_2); - public static final Version V_7_7_0 = new Version(7_07_00_99, IndexVersion.V_7_7_0); - public static final Version V_7_7_1 = new Version(7_07_01_99, IndexVersion.V_7_7_1); - public static final Version V_7_8_0 = new Version(7_08_00_99, IndexVersion.V_7_8_0); - public static final Version V_7_8_1 = new Version(7_08_01_99, IndexVersion.V_7_8_1); - public static final Version V_7_9_0 = new Version(7_09_00_99, IndexVersion.V_7_9_0); - public static final Version V_7_9_1 = new Version(7_09_01_99, IndexVersion.V_7_9_1); - public static final Version V_7_9_2 = new Version(7_09_02_99, IndexVersion.V_7_9_2); - public static final Version V_7_9_3 = new Version(7_09_03_99, IndexVersion.V_7_9_3); - public static final Version V_7_10_0 = new Version(7_10_00_99, IndexVersion.V_7_10_0); - public static final Version V_7_10_1 = new Version(7_10_01_99, IndexVersion.V_7_10_1); - public static final Version V_7_10_2 = new Version(7_10_02_99, IndexVersion.V_7_10_2); - public static final Version V_7_11_0 = new Version(7_11_00_99, IndexVersion.V_7_11_0); - public static final Version V_7_11_1 = new Version(7_11_01_99, IndexVersion.V_7_11_1); - public static final Version V_7_11_2 = new Version(7_11_02_99, IndexVersion.V_7_11_2); - public static final Version V_7_12_0 = new Version(7_12_00_99, IndexVersion.V_7_12_0); - public static final Version V_7_12_1 = new Version(7_12_01_99, IndexVersion.V_7_12_1); - public static final Version V_7_13_0 = new Version(7_13_00_99, IndexVersion.V_7_13_0); - public static final Version V_7_13_1 = new Version(7_13_01_99, IndexVersion.V_7_13_1); - public static final Version V_7_13_2 = new Version(7_13_02_99, IndexVersion.V_7_13_2); - public static final Version V_7_13_3 = new Version(7_13_03_99, IndexVersion.V_7_13_3); - public static final Version V_7_13_4 = new Version(7_13_04_99, IndexVersion.V_7_13_4); - public static final Version V_7_14_0 = new Version(7_14_00_99, IndexVersion.V_7_14_0); - public static final Version V_7_14_1 = new Version(7_14_01_99, IndexVersion.V_7_14_1); - public static final Version V_7_14_2 = new Version(7_14_02_99, IndexVersion.V_7_14_2); - public static final Version V_7_15_0 = new Version(7_15_00_99, IndexVersion.V_7_15_0); - public static final Version V_7_15_1 = new Version(7_15_01_99, IndexVersion.V_7_15_1); - public static final Version V_7_15_2 = new Version(7_15_02_99, IndexVersion.V_7_15_2); - public static final Version V_7_16_0 = new Version(7_16_00_99, IndexVersion.V_7_16_0); - public static final Version V_7_16_1 = new Version(7_16_01_99, IndexVersion.V_7_16_1); - public static final Version V_7_16_2 = new Version(7_16_02_99, IndexVersion.V_7_16_2); - public static final Version V_7_16_3 = new Version(7_16_03_99, IndexVersion.V_7_16_3); - public static final Version V_7_17_0 = new Version(7_17_00_99, IndexVersion.V_7_17_0); - public static final Version V_7_17_1 = new Version(7_17_01_99, IndexVersion.V_7_17_1); - public static final Version V_7_17_2 = new Version(7_17_02_99, IndexVersion.V_7_17_2); - public static final Version V_7_17_3 = new Version(7_17_03_99, IndexVersion.V_7_17_3); - public static final Version V_7_17_4 = new Version(7_17_04_99, IndexVersion.V_7_17_4); - public static final Version V_7_17_5 = new Version(7_17_05_99, IndexVersion.V_7_17_5); - public static final Version V_7_17_6 = new Version(7_17_06_99, IndexVersion.V_7_17_6); - public static final Version V_7_17_7 = new Version(7_17_07_99, IndexVersion.V_7_17_7); - public static final Version V_7_17_8 = new Version(7_17_08_99, IndexVersion.V_7_17_8); - public static final Version V_7_17_9 = new Version(7_17_09_99, IndexVersion.V_7_17_9); - public static final Version V_7_17_10 = new Version(7_17_10_99, IndexVersion.V_7_17_10); - public static final Version V_7_17_11 = new Version(7_17_11_99, IndexVersion.V_7_17_11); - public static final Version V_7_17_12 = new Version(7_17_12_99, IndexVersion.V_7_17_12); - public static final Version V_7_17_13 = new Version(7_17_13_99, IndexVersion.V_7_17_13); - public static final Version V_8_0_0 = new Version(8_00_00_99, IndexVersion.V_8_0_0); - public static final Version V_8_0_1 = new Version(8_00_01_99, IndexVersion.V_8_0_1); - public static final Version V_8_1_0 = new Version(8_01_00_99, IndexVersion.V_8_1_0); - public static final Version V_8_1_1 = new Version(8_01_01_99, IndexVersion.V_8_1_1); - public static final Version V_8_1_2 = new Version(8_01_02_99, IndexVersion.V_8_1_2); - public static final Version V_8_1_3 = new Version(8_01_03_99, IndexVersion.V_8_1_3); - public static final Version V_8_2_0 = new Version(8_02_00_99, IndexVersion.V_8_2_0); - public static final Version V_8_2_1 = new Version(8_02_01_99, IndexVersion.V_8_2_1); - public static final Version V_8_2_2 = new Version(8_02_02_99, IndexVersion.V_8_2_2); - public static final Version V_8_2_3 = new Version(8_02_03_99, IndexVersion.V_8_2_3); - public static final Version V_8_3_0 = new Version(8_03_00_99, IndexVersion.V_8_3_0); - public static final Version V_8_3_1 = new Version(8_03_01_99, IndexVersion.V_8_3_1); - public static final Version V_8_3_2 = new Version(8_03_02_99, IndexVersion.V_8_3_2); - public static final Version V_8_3_3 = new Version(8_03_03_99, IndexVersion.V_8_3_3); - public static final Version V_8_4_0 = new Version(8_04_00_99, IndexVersion.V_8_4_0); - public static final Version V_8_4_1 = new Version(8_04_01_99, IndexVersion.V_8_4_1); - public static final Version V_8_4_2 = new Version(8_04_02_99, IndexVersion.V_8_4_2); - public static final Version V_8_4_3 = new Version(8_04_03_99, IndexVersion.V_8_4_3); - public static final Version V_8_5_0 = new Version(8_05_00_99, IndexVersion.V_8_5_0); - public static final Version V_8_5_1 = new Version(8_05_01_99, IndexVersion.V_8_5_1); - public static final Version V_8_5_2 = new Version(8_05_02_99, IndexVersion.V_8_5_2); - public static final Version V_8_5_3 = new Version(8_05_03_99, IndexVersion.V_8_5_3); - public static final Version V_8_6_0 = new Version(8_06_00_99, IndexVersion.V_8_6_0); - public static final Version V_8_6_1 = new Version(8_06_01_99, IndexVersion.V_8_6_1); - public static final Version V_8_6_2 = new Version(8_06_02_99, IndexVersion.V_8_6_2); - public static final Version V_8_7_0 = new Version(8_07_00_99, IndexVersion.V_8_7_0); - public static final Version V_8_7_1 = new Version(8_07_01_99, IndexVersion.V_8_7_1); - public static final Version V_8_8_0 = new Version(8_08_00_99, IndexVersion.V_8_8_0); - public static final Version V_8_8_1 = new Version(8_08_01_99, IndexVersion.V_8_8_1); - public static final Version V_8_8_2 = new Version(8_08_02_99, IndexVersion.V_8_8_2); - public static final Version V_8_9_0 = new Version(8_09_00_99, IndexVersion.V_8_9_0); - public static final Version V_8_9_1 = new Version(8_09_01_99, IndexVersion.V_8_9_1); - public static final Version V_8_9_2 = new Version(8_09_02_99, IndexVersion.V_8_9_2); - public static final Version V_8_10_0 = new Version(8_10_00_99, IndexVersion.V_8_10_0); - public static final Version V_8_11_0 = new Version(8_11_00_99, IndexVersion.V_8_11_0); + public static final Version V_EMPTY = new Version(V_EMPTY_ID); + public static final Version V_7_0_0 = new Version(7_00_00_99); + public static final Version V_7_0_1 = new Version(7_00_01_99); + public static final Version V_7_1_0 = new Version(7_01_00_99); + public static final Version V_7_1_1 = new Version(7_01_01_99); + public static final Version V_7_2_0 = new Version(7_02_00_99); + public static final Version V_7_2_1 = new Version(7_02_01_99); + public static final Version V_7_3_0 = new Version(7_03_00_99); + public static final Version V_7_3_1 = new Version(7_03_01_99); + public static final Version V_7_3_2 = new Version(7_03_02_99); + public static final Version V_7_4_0 = new Version(7_04_00_99); + public static final Version V_7_4_1 = new Version(7_04_01_99); + public static final Version V_7_4_2 = new Version(7_04_02_99); + public static final Version V_7_5_0 = new Version(7_05_00_99); + public static final Version V_7_5_1 = new Version(7_05_01_99); + public static final Version V_7_5_2 = new Version(7_05_02_99); + public static final Version V_7_6_0 = new Version(7_06_00_99); + public static final Version V_7_6_1 = new Version(7_06_01_99); + public static final Version V_7_6_2 = new Version(7_06_02_99); + public static final Version V_7_7_0 = new Version(7_07_00_99); + public static final Version V_7_7_1 = new Version(7_07_01_99); + public static final Version V_7_8_0 = new Version(7_08_00_99); + public static final Version V_7_8_1 = new Version(7_08_01_99); + public static final Version V_7_9_0 = new Version(7_09_00_99); + public static final Version V_7_9_1 = new Version(7_09_01_99); + public static final Version V_7_9_2 = new Version(7_09_02_99); + public static final Version V_7_9_3 = new Version(7_09_03_99); + public static final Version V_7_10_0 = new Version(7_10_00_99); + public static final Version V_7_10_1 = new Version(7_10_01_99); + public static final Version V_7_10_2 = new Version(7_10_02_99); + public static final Version V_7_11_0 = new Version(7_11_00_99); + public static final Version V_7_11_1 = new Version(7_11_01_99); + public static final Version V_7_11_2 = new Version(7_11_02_99); + public static final Version V_7_12_0 = new Version(7_12_00_99); + public static final Version V_7_12_1 = new Version(7_12_01_99); + public static final Version V_7_13_0 = new Version(7_13_00_99); + public static final Version V_7_13_1 = new Version(7_13_01_99); + public static final Version V_7_13_2 = new Version(7_13_02_99); + public static final Version V_7_13_3 = new Version(7_13_03_99); + public static final Version V_7_13_4 = new Version(7_13_04_99); + public static final Version V_7_14_0 = new Version(7_14_00_99); + public static final Version V_7_14_1 = new Version(7_14_01_99); + public static final Version V_7_14_2 = new Version(7_14_02_99); + public static final Version V_7_15_0 = new Version(7_15_00_99); + public static final Version V_7_15_1 = new Version(7_15_01_99); + public static final Version V_7_15_2 = new Version(7_15_02_99); + public static final Version V_7_16_0 = new Version(7_16_00_99); + public static final Version V_7_16_1 = new Version(7_16_01_99); + public static final Version V_7_16_2 = new Version(7_16_02_99); + public static final Version V_7_16_3 = new Version(7_16_03_99); + public static final Version V_7_17_0 = new Version(7_17_00_99); + public static final Version V_7_17_1 = new Version(7_17_01_99); + public static final Version V_7_17_2 = new Version(7_17_02_99); + public static final Version V_7_17_3 = new Version(7_17_03_99); + public static final Version V_7_17_4 = new Version(7_17_04_99); + public static final Version V_7_17_5 = new Version(7_17_05_99); + public static final Version V_7_17_6 = new Version(7_17_06_99); + public static final Version V_7_17_7 = new Version(7_17_07_99); + public static final Version V_7_17_8 = new Version(7_17_08_99); + public static final Version V_7_17_9 = new Version(7_17_09_99); + public static final Version V_7_17_10 = new Version(7_17_10_99); + public static final Version V_7_17_11 = new Version(7_17_11_99); + public static final Version V_7_17_12 = new Version(7_17_12_99); + public static final Version V_7_17_13 = new Version(7_17_13_99); + public static final Version V_7_17_14 = new Version(7_17_14_99); + public static final Version V_8_0_0 = new Version(8_00_00_99); + public static final Version V_8_0_1 = new Version(8_00_01_99); + public static final Version V_8_1_0 = new Version(8_01_00_99); + public static final Version V_8_1_1 = new Version(8_01_01_99); + public static final Version V_8_1_2 = new Version(8_01_02_99); + public static final Version V_8_1_3 = new Version(8_01_03_99); + public static final Version V_8_2_0 = new Version(8_02_00_99); + public static final Version V_8_2_1 = new Version(8_02_01_99); + public static final Version V_8_2_2 = new Version(8_02_02_99); + public static final Version V_8_2_3 = new Version(8_02_03_99); + public static final Version V_8_3_0 = new Version(8_03_00_99); + public static final Version V_8_3_1 = new Version(8_03_01_99); + public static final Version V_8_3_2 = new Version(8_03_02_99); + public static final Version V_8_3_3 = new Version(8_03_03_99); + public static final Version V_8_4_0 = new Version(8_04_00_99); + public static final Version V_8_4_1 = new Version(8_04_01_99); + public static final Version V_8_4_2 = new Version(8_04_02_99); + public static final Version V_8_4_3 = new Version(8_04_03_99); + public static final Version V_8_5_0 = new Version(8_05_00_99); + public static final Version V_8_5_1 = new Version(8_05_01_99); + public static final Version V_8_5_2 = new Version(8_05_02_99); + public static final Version V_8_5_3 = new Version(8_05_03_99); + public static final Version V_8_6_0 = new Version(8_06_00_99); + public static final Version V_8_6_1 = new Version(8_06_01_99); + public static final Version V_8_6_2 = new Version(8_06_02_99); + public static final Version V_8_7_0 = new Version(8_07_00_99); + public static final Version V_8_7_1 = new Version(8_07_01_99); + public static final Version V_8_8_0 = new Version(8_08_00_99); + public static final Version V_8_8_1 = new Version(8_08_01_99); + public static final Version V_8_8_2 = new Version(8_08_02_99); + public static final Version V_8_9_0 = new Version(8_09_00_99); + public static final Version V_8_9_1 = new Version(8_09_01_99); + public static final Version V_8_9_2 = new Version(8_09_02_99); + public static final Version V_8_9_3 = new Version(8_09_03_99); + public static final Version V_8_10_0 = new Version(8_10_00_99); + public static final Version V_8_11_0 = new Version(8_11_00_99); public static final Version CURRENT = V_8_11_0; private static final NavigableMap VERSION_IDS; @@ -214,8 +214,7 @@ public static Version fromId(int id) { } private static Version fromIdSlow(int id) { - // TODO: assume this is an old version that has index version == release version - return new Version(id, IndexVersion.fromId(id)); + return new Version(id); } public static void writeVersion(Version version, StreamOutput out) throws IOException { @@ -298,18 +297,15 @@ private static Version fromStringSlow(String version) { public final byte minor; public final byte revision; public final byte build; - @Deprecated(forRemoval = true) - public final IndexVersion indexVersion; private final String toString; private final int previousMajorId; - Version(int id, IndexVersion indexVersion) { + Version(int id) { this.id = id; this.major = (byte) ((id / 1000000) % 100); this.minor = (byte) ((id / 10000) % 100); this.revision = (byte) ((id / 100) % 100); this.build = (byte) (id % 100); - this.indexVersion = Objects.requireNonNull(indexVersion); this.toString = major + "." + minor + "." + revision; this.previousMajorId = major > 0 ? (major - 1) * 1000000 + 99 : major; } @@ -324,11 +320,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder.value(toString()); } - @Deprecated(forRemoval = true) - public org.apache.lucene.util.Version luceneVersion() { - return indexVersion.luceneVersion(); - } - /* * We need the declared versions when computing the minimum compatibility version. As computing the declared versions uses reflection it * is not cheap. Since computing the minimum compatibility version can occur often, we use this holder to compute the declared versions diff --git a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java index 482f854d98fa0..4800ba191edf7 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java +++ b/server/src/main/java/org/elasticsearch/action/ActionListenerResponseHandler.java @@ -35,8 +35,8 @@ public class ActionListenerResponseHandler i * @param executor The executor to use to deserialize the response and notify the listener. You must only use * {@link EsExecutors#DIRECT_EXECUTOR_SERVICE} (or equivalently {@link TransportResponseHandler#TRANSPORT_WORKER}) * for very performance-critical actions, and even then only if the deserialization and handling work is very cheap, - * because this executor will perform because this executor will perform all the work for responses from remote nodes on - * the receiving transport worker itself. + * because this executor will perform all the work for responses from remote nodes on the receiving transport worker + * itself. */ public ActionListenerResponseHandler(ActionListener listener, Writeable.Reader reader, Executor executor) { this.listener = Objects.requireNonNull(listener); diff --git a/server/src/main/java/org/elasticsearch/action/ActionType.java b/server/src/main/java/org/elasticsearch/action/ActionType.java index 4e5e1d9b821d3..d24abf049ec1e 100644 --- a/server/src/main/java/org/elasticsearch/action/ActionType.java +++ b/server/src/main/java/org/elasticsearch/action/ActionType.java @@ -9,7 +9,6 @@ package org.elasticsearch.action; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.transport.TransportRequestOptions; /** * A generic action. Should strive to make it a singleton. @@ -42,13 +41,6 @@ public Writeable.Reader getResponseReader() { return responseReader; } - /** - * Optional request options for the action. - */ - public TransportRequestOptions transportOptions() { - return TransportRequestOptions.EMPTY; - } - @Override public boolean equals(Object o) { return o instanceof ActionType && name.equals(((ActionType) o).name()); diff --git a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java index 2226d3be323d6..9ea073a69d4d4 100644 --- a/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java +++ b/server/src/main/java/org/elasticsearch/action/DocWriteResponse.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.action.support.WriteResponse; @@ -118,7 +118,7 @@ public DocWriteResponse(ShardId shardId, String id, long seqNo, long primaryTerm protected DocWriteResponse(ShardId shardId, StreamInput in) throws IOException { super(in); this.shardId = shardId; - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -137,7 +137,7 @@ protected DocWriteResponse(ShardId shardId, StreamInput in) throws IOException { protected DocWriteResponse(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -266,7 +266,7 @@ public void writeTo(StreamOutput out) throws IOException { } private void writeWithoutShardId(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java index c316bd589e975..ec43dfdb3fd7f 100644 --- a/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java +++ b/server/src/main/java/org/elasticsearch/action/RoutingMissingException.java @@ -9,7 +9,7 @@ package org.elasticsearch.action; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.MapperService; @@ -41,7 +41,7 @@ public RestStatus status() { public RoutingMissingException(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readString(); } id = in.readString(); @@ -50,7 +50,7 @@ public RoutingMissingException(StreamInput in) throws IOException { @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java index 3200b41d23fe4..34c1bb4a0c85f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplanation.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.allocation; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.ShardRouting; @@ -68,7 +68,7 @@ public ClusterAllocationExplanation( } public ClusterAllocationExplanation(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { this.specificShard = in.readBoolean(); } else { this.specificShard = true; // suppress "this is a random shard" warning in BwC situations @@ -82,7 +82,7 @@ public ClusterAllocationExplanation(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { out.writeBoolean(specificShard); } // else suppress "this is a random shard" warning in BwC situations shardRouting.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java index 168dc5c054dc4..0b5f5fc023dc0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/DesiredBalanceResponse.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.admin.cluster.allocation; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.routing.AllocationId; @@ -37,8 +38,8 @@ public class DesiredBalanceResponse extends ActionResponse implements ChunkedToXContentObject { - private static final TransportVersion CLUSTER_BALANCE_STATS_VERSION = TransportVersion.V_8_7_0; - private static final TransportVersion CLUSTER_INFO_VERSION = TransportVersion.V_8_8_0; + private static final TransportVersion CLUSTER_BALANCE_STATS_VERSION = TransportVersions.V_8_7_0; + private static final TransportVersion CLUSTER_INFO_VERSION = TransportVersions.V_8_8_0; private final DesiredBalanceStats stats; private final ClusterBalanceStats clusterBalanceStats; @@ -76,12 +77,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeMap( routingTable, - StreamOutput::writeString, - (shardsOut, shards) -> shardsOut.writeMap( - shards, - StreamOutput::writeVInt, - (desiredShardsOut, desiredShards) -> desiredShards.writeTo(desiredShardsOut) - ) + (shardsOut, shards) -> shardsOut.writeMap(shards, StreamOutput::writeVInt, StreamOutput::writeWriteable) ); if (out.getTransportVersion().onOrAfter(CLUSTER_INFO_VERSION)) { out.writeWriteable(clusterInfo); @@ -164,12 +160,12 @@ public String toString() { public record DesiredShards(List current, ShardAssignmentView desired) implements Writeable, ChunkedToXContentObject { public static DesiredShards from(StreamInput in) throws IOException { - return new DesiredShards(in.readList(ShardView::from), ShardAssignmentView.from(in)); + return new DesiredShards(in.readCollectionAsList(ShardView::from), ShardAssignmentView.from(in)); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(current); + out.writeCollection(current); desired.writeTo(out); } @@ -202,9 +198,9 @@ public record ShardView( List tierPreference ) implements Writeable, ToXContentObject { - private static final TransportVersion ADD_FORECASTS_VERSION = TransportVersion.V_8_7_0; - private static final TransportVersion ADD_TIER_PREFERENCE = TransportVersion.V_8_8_0; - private static final TransportVersion NULLABLE_RELOCATING_NODE_IS_DESIRED = TransportVersion.V_8_8_0; + private static final TransportVersion ADD_FORECASTS_VERSION = TransportVersions.V_8_7_0; + private static final TransportVersion ADD_TIER_PREFERENCE = TransportVersions.V_8_8_0; + private static final TransportVersion NULLABLE_RELOCATING_NODE_IS_DESIRED = TransportVersions.V_8_8_0; public ShardView { assert (relocatingNode == null) == (relocatingNodeIsDesired == null) @@ -231,7 +227,9 @@ public static ShardView from(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(ADD_FORECASTS_VERSION) == false) { in.readOptionalWriteable(AllocationId::new); } - List tierPreference = in.getTransportVersion().onOrAfter(ADD_TIER_PREFERENCE) ? in.readStringList() : List.of(); + List tierPreference = in.getTransportVersion().onOrAfter(ADD_TIER_PREFERENCE) + ? in.readStringCollectionAsList() + : List.of(); return new ShardView( state, primary, @@ -295,7 +293,7 @@ public record ShardAssignmentView(Set nodeIds, int total, int unassigned public static final ShardAssignmentView EMPTY = new ShardAssignmentView(Set.of(), 0, 0, 0); public static ShardAssignmentView from(StreamInput in) throws IOException { - final var nodeIds = in.readSet(StreamInput::readString); + final var nodeIds = in.readCollectionAsSet(StreamInput::readString); final var total = in.readVInt(); final var unassigned = in.readVInt(); final var ignored = in.readVInt(); @@ -308,7 +306,7 @@ public static ShardAssignmentView from(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(nodeIds, StreamOutput::writeString); + out.writeStringCollection(nodeIds); out.writeVInt(total); out.writeVInt(unassigned); out.writeVInt(ignored); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java index 01bd97bdc5fb0..75877cf0630f4 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/configuration/AddVotingConfigExclusionsRequest.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.admin.cluster.configuration; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.ClusterState; @@ -72,7 +72,7 @@ public AddVotingConfigExclusionsRequest(String[] nodeIds, String[] nodeNames, Ti public AddVotingConfigExclusionsRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { final String[] legacyNodeDescriptions = in.readStringArray(); if (legacyNodeDescriptions.length > 0) { throw new IllegalArgumentException("legacy [node_name] field was deprecated and must be empty"); @@ -185,7 +185,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } out.writeStringArray(nodeIds); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java index fa95c4a7df69e..4b33a12d68b1f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesRequest.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; @@ -25,7 +26,7 @@ import java.util.Objects; public class UpdateDesiredNodesRequest extends AcknowledgedRequest { - private static final TransportVersion DRY_RUN_VERSION = TransportVersion.V_8_4_0; + private static final TransportVersion DRY_RUN_VERSION = TransportVersions.V_8_4_0; private final String historyID; private final long version; @@ -58,7 +59,7 @@ public UpdateDesiredNodesRequest(StreamInput in) throws IOException { super(in); this.historyID = in.readString(); this.version = in.readLong(); - this.nodes = in.readList(DesiredNode::readFrom); + this.nodes = in.readCollectionAsList(DesiredNode::readFrom); if (in.getTransportVersion().onOrAfter(DRY_RUN_VERSION)) { this.dryRun = in.readBoolean(); } else { @@ -71,7 +72,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(historyID); out.writeLong(version); - out.writeList(nodes); + out.writeCollection(nodes); if (out.getTransportVersion().onOrAfter(DRY_RUN_VERSION)) { out.writeBoolean(dryRun); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java index 0d99cc9d693ef..1b9b26e978838 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/UpdateDesiredNodesResponse.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.desirednodes; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -19,7 +20,7 @@ import java.util.Objects; public class UpdateDesiredNodesResponse extends ActionResponse implements ToXContentObject { - private static final TransportVersion DRY_RUN_SUPPORTING_VERSION = TransportVersion.V_8_4_0; + private static final TransportVersion DRY_RUN_SUPPORTING_VERSION = TransportVersions.V_8_4_0; private final boolean replacedExistingHistoryId; private final boolean dryRun; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index eee3e2f6722ce..309f43c966fee 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -133,13 +133,17 @@ protected void onPublicationComplete() { final long timeoutInMillis = Math.max(0, endTimeRelativeMillis - threadPool.relativeTimeInMillis()); final TimeValue newTimeout = TimeValue.timeValueMillis(timeoutInMillis); request.timeout(newTimeout); - executeHealth( - task, - request, - clusterService.state(), - listener, - waitCount, - observedState -> waitForEventsAndExecuteHealth(task, request, listener, waitCount, endTimeRelativeMillis) + + // Move the heavy work off of the master service and back onto a MANAGEMENT thread. + executor.execute( + () -> executeHealth( + task, + request, + clusterService.state(), + listener, + waitCount, + observedState -> waitForEventsAndExecuteHealth(task, request, listener, waitCount, endTimeRelativeMillis) + ) ); } @@ -167,20 +171,26 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) // applier service has a different view of the cluster state from the one supplied here final ClusterState appliedState = clusterService.state(); assert newState.stateUUID().equals(appliedState.stateUUID()) : newState.stateUUID() + " vs " + appliedState.stateUUID(); - executeHealth( - task, - request, - appliedState, - listener, - waitCount, - observedState -> waitForEventsAndExecuteHealth(task, request, listener, waitCount, endTimeRelativeMillis) + + // Move the heavy work off of the master service and back onto a MANAGEMENT thread. + executor.execute( + () -> executeHealth( + task, + request, + appliedState, + listener, + waitCount, + observedState -> waitForEventsAndExecuteHealth(task, request, listener, waitCount, endTimeRelativeMillis) + ) ); } @Override public void onFailure(Exception e) { if (e instanceof ProcessClusterEventTimeoutException) { - sendResponse(task, request, clusterService.state(), waitCount, TimeoutState.TIMED_OUT, listener); + executor.execute( + () -> sendResponse(task, request, clusterService.state(), waitCount, TimeoutState.TIMED_OUT, listener) + ); } else { final Level level = isExpectedFailure(e) ? Level.TRACE : Level.ERROR; logger.log(level, () -> "unexpected failure during [" + source + "]", e); @@ -236,7 +246,7 @@ private void executeHealth( final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState newState) { - onNewClusterStateAfterDelay.accept(newState); + executor.execute(() -> onNewClusterStateAfterDelay.accept(newState)); } @Override @@ -246,7 +256,9 @@ public void onClusterServiceClose() { @Override public void onTimeout(TimeValue timeout) { - sendResponse(task, request, observer.setAndGetObservedState(), waitCount, TimeoutState.TIMED_OUT, listener); + executor.execute( + () -> sendResponse(task, request, observer.setAndGetObservedState(), waitCount, TimeoutState.TIMED_OUT, listener) + ); } }; observer.waitForNextChange(stateListener, validationPredicate, request.timeout()); @@ -301,6 +313,10 @@ private void sendResponse( final TimeoutState timeoutState, final ActionListener listener ) { + // Creating the ClusterHealthResponse below can be computationally heavy. Ensure this thread is not running on a time-critical + // thread, like the master service or cluster state update applier threads. + assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.MANAGEMENT); + ActionListener.completeWith(listener, () -> { task.ensureNotCancelled(); ClusterHealthResponse response = clusterHealth( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java index 4c2b97af9bd26..29846fab10977 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/GetFeatureUpgradeStatusResponse.java @@ -48,7 +48,7 @@ public GetFeatureUpgradeStatusResponse(List statuses, Upgr */ public GetFeatureUpgradeStatusResponse(StreamInput in) throws IOException { super(in); - this.featureUpgradeStatuses = in.readImmutableList(FeatureUpgradeStatus::new); + this.featureUpgradeStatuses = in.readCollectionAsImmutableList(FeatureUpgradeStatus::new); this.upgradeStatus = in.readEnum(UpgradeStatus.class); } @@ -67,7 +67,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(this.featureUpgradeStatuses); + out.writeCollection(this.featureUpgradeStatuses); out.writeEnum(upgradeStatus); } @@ -154,7 +154,7 @@ public FeatureUpgradeStatus(StreamInput in) throws IOException { this.featureName = in.readString(); this.minimumIndexVersion = IndexVersion.readVersion(in); this.upgradeStatus = in.readEnum(UpgradeStatus.class); - this.indexInfos = in.readImmutableList(IndexInfo::new); + this.indexInfos = in.readCollectionAsImmutableList(IndexInfo::new); } public String getFeatureName() { @@ -178,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(this.featureName); IndexVersion.writeVersion(this.minimumIndexVersion, out); out.writeEnum(this.upgradeStatus); - out.writeList(this.indexInfos); + out.writeCollection(this.indexInfos); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeResponse.java index d7ea624d3b9c2..ca3771654de81 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/PostFeatureUpgradeResponse.java @@ -59,7 +59,7 @@ public PostFeatureUpgradeResponse( public PostFeatureUpgradeResponse(StreamInput in) throws IOException { super(in); this.accepted = in.readBoolean(); - this.features = in.readImmutableList(Feature::new); + this.features = in.readCollectionAsImmutableList(Feature::new); this.reason = in.readOptionalString(); this.elasticsearchException = in.readOptionalWriteable(ElasticsearchException::new); } @@ -91,7 +91,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(this.accepted); - out.writeList(this.features); + out.writeCollection(this.features); out.writeOptionalString(this.reason); out.writeOptionalWriteable(this.elasticsearchException); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java index e87427c3fdef6..054d6c7b1f6cc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.hotthreads; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.nodes.BaseNodesRequest; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,7 +36,7 @@ public NodesHotThreadsRequest(StreamInput in) throws IOException { type = HotThreads.ReportType.of(in.readString()); interval = in.readTimeValue(); snapshots = in.readInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { sortOrder = HotThreads.SortOrder.of(in.readString()); } } @@ -118,7 +118,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type.getTypeValue()); out.writeTimeValue(interval); out.writeInt(snapshots); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeString(sortOrder.getOrderValue()); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java index edf7d88ae4a9a..333629d4f522b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/hotthreads/NodesHotThreadsResponse.java @@ -47,12 +47,12 @@ public Iterator> getTextChunks() { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeHotThreads::new); + return in.readCollectionAsList(NodeHotThreads::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } private static class LinesIterator implements Iterator { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java index 063ba3cd62688..35dc876b3a585 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodeInfo.java @@ -10,6 +10,7 @@ import org.elasticsearch.Build; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -58,7 +59,7 @@ public class NodeInfo extends BaseNodeResponse { public NodeInfo(StreamInput in) throws IOException { super(in); version = Version.readVersion(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { transportVersion = TransportVersion.readVersion(in); } else { transportVersion = TransportVersion.fromId(version.id); @@ -82,10 +83,10 @@ public NodeInfo(StreamInput in) throws IOException { addInfoIfNonNull(HttpInfo.class, in.readOptionalWriteable(HttpInfo::new)); addInfoIfNonNull(PluginsAndModules.class, in.readOptionalWriteable(PluginsAndModules::new)); addInfoIfNonNull(IngestInfo.class, in.readOptionalWriteable(IngestInfo::new)); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { addInfoIfNonNull(AggregationInfo.class, in.readOptionalWriteable(AggregationInfo::new)); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { addInfoIfNonNull(RemoteClusterServerInfo.class, in.readOptionalWriteable(RemoteClusterServerInfo::new)); } } @@ -196,7 +197,7 @@ private void addInfoIfNonNull(Class clazz, public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); Version.writeVersion(version, out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { TransportVersion.writeVersion(transportVersion, out); } Build.writeBuild(build, out); @@ -220,10 +221,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(getInfo(HttpInfo.class)); out.writeOptionalWriteable(getInfo(PluginsAndModules.class)); out.writeOptionalWriteable(getInfo(IngestInfo.class)); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalWriteable(getInfo(AggregationInfo.class)); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalWriteable(getInfo(RemoteClusterServerInfo.class)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java index a7f4e054bf638..42bdec20d358b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoRequest.java @@ -109,7 +109,7 @@ public NodesInfoRequest removeMetric(String metric) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeStringArray(requestedMetrics.toArray(String[]::new)); + out.writeStringCollection(requestedMetrics); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java index f756f0e7929f1..2b97ee38daa9f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/NodesInfoResponse.java @@ -44,12 +44,12 @@ public NodesInfoResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeInfo::new); + return in.readCollectionAsList(NodeInfo::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java index e6a91b152cad5..3691b3b432f3a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginsAndModules.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.info; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.node.ReportingService; @@ -35,18 +35,18 @@ public PluginsAndModules(List plugins, List } public PluginsAndModules(StreamInput in) throws IOException { - this.plugins = in.readImmutableList(PluginRuntimeInfo::new); - this.modules = in.readImmutableList(PluginDescriptor::new); + this.plugins = in.readCollectionAsImmutableList(PluginRuntimeInfo::new); + this.modules = in.readCollectionAsImmutableList(PluginDescriptor::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { - out.writeList(plugins); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { + out.writeCollection(plugins); } else { - out.writeList(plugins.stream().map(PluginRuntimeInfo::descriptor).toList()); + out.writeCollection(plugins.stream().map(PluginRuntimeInfo::descriptor).toList()); } - out.writeList(modules); + out.writeCollection(modules); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java index d9ab1f6484833..9fbf643bfa8fc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/reload/NodesReloadSecureSettingsResponse.java @@ -40,12 +40,12 @@ public NodesReloadSecureSettingsResponse(ClusterName clusterName, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java index 29fca1810bf23..f04610ea28a7d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathRequest.java @@ -31,13 +31,13 @@ public NodePrevalidateShardPathRequest(Collection shardIds) { public NodePrevalidateShardPathRequest(StreamInput in) throws IOException { super(in); - this.shardIds = in.readImmutableSet(ShardId::new); + this.shardIds = in.readCollectionAsImmutableSet(ShardId::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeCollection(shardIds, (o, value) -> value.writeTo(o)); + out.writeCollection(shardIds); } public Set getShardIds() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java index 763f39bb1d93a..cc0728260b30e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodePrevalidateShardPathResponse.java @@ -29,7 +29,7 @@ protected NodePrevalidateShardPathResponse(DiscoveryNode node, Set shar protected NodePrevalidateShardPathResponse(StreamInput in) throws IOException { super(in); - shardIds = in.readImmutableSet(ShardId::new); + shardIds = in.readCollectionAsImmutableSet(ShardId::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java index 4f6ea0d5b83d8..e87222e2f914a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/NodesRemovalPrevalidation.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.shutdown; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -45,14 +45,14 @@ static void configureParser(ConstructingObjectParser parser) { } public static NodesRemovalPrevalidation readFrom(final StreamInput in) throws IOException { - return new NodesRemovalPrevalidation(in.readBoolean(), in.readString(), in.readList(NodeResult::readFrom)); + return new NodesRemovalPrevalidation(in.readBoolean(), in.readString(), in.readCollectionAsList(NodeResult::readFrom)); } @Override public void writeTo(final StreamOutput out) throws IOException { out.writeBoolean(isSafe); out.writeString(message); - out.writeList(nodes); + out.writeCollection(nodes); } @Override @@ -148,14 +148,14 @@ static void configureParser(ConstructingObjectParser parser) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isSafe); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { reason.writeTo(out); } out.writeString(message); } public static Result readFrom(final StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_7_0)) { return new Result(in.readBoolean(), null, in.readString()); } return new Result(in.readBoolean(), Reason.readFrom(in), in.readString()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java index 5073090567936..acffb014715dd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateNodeRemovalRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.shutdown; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeReadRequest; import org.elasticsearch.common.Strings; @@ -44,7 +44,7 @@ public PrevalidateNodeRemovalRequest(final StreamInput in) throws IOException { names = in.readStringArray(); ids = in.readStringArray(); externalIds = in.readStringArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { timeout = in.readTimeValue(); } } @@ -55,7 +55,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(names); out.writeStringArray(ids); out.writeStringArray(externalIds); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeTimeValue(timeout); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathRequest.java index 3172df98cfc00..18464346ad889 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathRequest.java @@ -29,7 +29,7 @@ public PrevalidateShardPathRequest(Set shardIds, String... nodeIds) { public PrevalidateShardPathRequest(StreamInput in) throws IOException { super(in); - this.shardIds = in.readImmutableSet(ShardId::new); + this.shardIds = in.readCollectionAsImmutableSet(ShardId::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java index 2bbaa6eb28270..0267d299fe57a 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/shutdown/PrevalidateShardPathResponse.java @@ -33,11 +33,11 @@ public PrevalidateShardPathResponse(StreamInput in) throws IOException { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodePrevalidateShardPathResponse::new); + return in.readCollectionAsList(NodePrevalidateShardPathResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java index dce305cb840db..b8d1a431f92e8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodeStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.nodes.BaseNodeResponse; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -117,7 +117,7 @@ public NodeStats(StreamInput in) throws IOException { ingestStats = in.readOptionalWriteable(IngestStats::read); adaptiveSelectionStats = in.readOptionalWriteable(AdaptiveSelectionStats::new); indexingPressureStats = in.readOptionalWriteable(IndexingPressureStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { repositoriesStats = in.readOptionalWriteable(RepositoriesStats::new); } else { repositoriesStats = null; @@ -294,7 +294,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(ingestStats); out.writeOptionalWriteable(adaptiveSelectionStats); out.writeOptionalWriteable(indexingPressureStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(repositoriesStats); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java index be1cb83c52afb..0fd4df0eec3a1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequest.java @@ -10,6 +10,7 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.action.support.nodes.BaseNodesRequest; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.tasks.CancellableTask; @@ -42,7 +43,7 @@ public NodesStatsRequest(StreamInput in) throws IOException { indices = new CommonStatsFlags(in); requestedMetrics.clear(); - requestedMetrics.addAll(in.readStringList()); + requestedMetrics.addAll(in.readStringCollectionAsList()); } /** @@ -148,16 +149,31 @@ public NodesStatsRequest removeMetric(String metric) { return this; } + @Override + public String getDescription() { + return Strings.format( + "nodes=%s, metrics=%s, flags=%s", + Arrays.toString(nodesIds()), + requestedMetrics.toString(), + Arrays.toString(indices.getFlags()) + ); + } + @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, "", parentTaskId, headers); + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return NodesStatsRequest.this.getDescription(); + } + }; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); indices.writeTo(out); - out.writeStringArray(requestedMetrics.toArray(String[]::new)); + out.writeStringCollection(requestedMetrics); } /** diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java index 29c62c638d929..c99629bbdfd62 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsResponse.java @@ -34,12 +34,12 @@ public NodesStatsResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeStats::new); + return in.readCollectionAsList(NodeStats::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java index 473180d0173e4..53e6ca19bbe84 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/stats/TransportNodesStatsAction.java @@ -116,7 +116,12 @@ public NodeStatsRequest(StreamInput in) throws IOException { @Override public Task createTask(long id, String type, String action, TaskId parentTaskId, Map headers) { - return new CancellableTask(id, type, action, "", parentTaskId, headers); + return new CancellableTask(id, type, action, "", parentTaskId, headers) { + @Override + public String getDescription() { + return request.getDescription(); + } + }; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java index dbafbea57c7ef..bc6394c245d8b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/get/TransportGetTaskAction.java @@ -166,7 +166,7 @@ void getRunningTaskFromNode(Task thisTask, GetTaskRequest request, ActionListene future.addTimeout( requireNonNullElse(request.getTimeout(), DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT), threadPool, - ThreadPool.Names.SAME + EsExecutors.DIRECT_EXECUTOR_SERVICE ); } else { TaskInfo info = runningTask.taskInfo(clusterService.localNode().getId(), true); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java index 597c9821e48ec..f42f138288de0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.node.tasks.list; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.common.Strings; @@ -43,7 +43,7 @@ public ListTasksRequest(StreamInput in) throws IOException { super(in); detailed = in.readBoolean(); waitForCompletion = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { descriptions = in.readStringArray(); } } @@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeBoolean(detailed); out.writeBoolean(waitForCompletion); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeStringArray(descriptions); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java index d9e1790cd38ef..7d883ad60b4e7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/ListTasksResponse.java @@ -60,13 +60,13 @@ public ListTasksResponse( public ListTasksResponse(StreamInput in) throws IOException { super(in); - tasks = in.readImmutableList(TaskInfo::from); + tasks = in.readCollectionAsImmutableList(TaskInfo::from); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(tasks); + out.writeCollection(tasks); } protected static ConstructingObjectParser setupParser( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java index 4cf75c9951225..eda1c4ebab08d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/tasks/list/TransportListTasksAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.RefCounted; import org.elasticsearch.core.TimeValue; @@ -142,7 +143,7 @@ protected void processTasks(CancellableTask nodeTask, ListTasksRequest request, future.addTimeout( requireNonNullElse(request.getTimeout(), DEFAULT_WAIT_FOR_COMPLETION_TIMEOUT), threadPool, - ThreadPool.Names.SAME + EsExecutors.DIRECT_EXECUTOR_SERVICE ); nodeTask.addListener(() -> future.onFailure(new TaskCancelledException("task cancelled"))); } else { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java index 136c99aca2145..0ce0bcfb884eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/node/usage/NodesUsageResponse.java @@ -36,12 +36,12 @@ public NodesUsageResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeUsage::new); + return in.readCollectionAsList(NodeUsage::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java index e324b2a8433e3..5898f258865d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteClusterNodesAction.java @@ -79,12 +79,12 @@ public Response(List nodes) { public Response(StreamInput in) throws IOException { super(in); - this.nodes = in.readList(DiscoveryNode::new); + this.nodes = in.readCollectionAsList(DiscoveryNode::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public List getNodes() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java index 1c9c0233102dd..89e1ede46d9a8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/remote/RemoteInfoResponse.java @@ -25,7 +25,7 @@ public final class RemoteInfoResponse extends ActionResponse implements ToXConte RemoteInfoResponse(StreamInput in) throws IOException { super(in); - infos = in.readImmutableList(RemoteConnectionInfo::new); + infos = in.readCollectionAsImmutableList(RemoteConnectionInfo::new); } public RemoteInfoResponse(Collection infos) { @@ -38,7 +38,7 @@ public List getInfos() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(infos); + out.writeCollection(infos); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java index 7c90585b17356..354c67cfb416b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/repositories/verify/VerifyRepositoryResponse.java @@ -117,7 +117,7 @@ public VerifyRepositoryResponse() {} public VerifyRepositoryResponse(StreamInput in) throws IOException { super(in); - this.nodes = in.readList(NodeView::new); + this.nodes = in.readCollectionAsList(NodeView::new); } public VerifyRepositoryResponse(DiscoveryNode[] nodes) { @@ -130,7 +130,7 @@ public VerifyRepositoryResponse(List nodes) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public List getNodes() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java index 76d59660da0b4..0e868087b637d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/settings/ClusterGetSettingsAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.settings; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -37,12 +37,12 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); - assert in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); + assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); } @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); + assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); super.writeTo(out); } @@ -77,7 +77,7 @@ public int hashCode() { public Response(StreamInput in) throws IOException { super(in); - assert in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); + assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); persistentSettings = Settings.readSettingsFromStream(in); transientSettings = Settings.readSettingsFromStream(in); settings = Settings.readSettingsFromStream(in); @@ -91,7 +91,7 @@ public Response(Settings persistentSettings, Settings transientSettings, Setting @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0); + assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0); persistentSettings.writeTo(out); transientSettings.writeTo(out); settings.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java index 9c127b6360f90..4781c06c1e2eb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsResponse.java @@ -37,7 +37,7 @@ public ClusterSearchShardsResponse(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeArray(groups); out.writeArray(nodes); - out.writeMap(indicesAndFilters, StreamOutput::writeString, (o, s) -> s.writeTo(o)); + out.writeMap(indicesAndFilters, StreamOutput::writeWriteable); } public ClusterSearchShardsResponse( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java index 65ca9bce89721..47acc90d4a09b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/clone/CloneSnapshotRequest.java @@ -139,11 +139,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field("source", source); builder.field("target", target); if (indices != null) { - builder.startArray("indices"); - for (String index : indices) { - builder.value(index); - } - builder.endArray(); + builder.array("indices", indices); } if (indicesOptions != null) { indicesOptions.toXContent(builder, params); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java index e0085a45cd3ca..7c5d11a884d60 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/CreateSnapshotRequest.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -54,7 +55,7 @@ public class CreateSnapshotRequest extends MasterNodeRequest features) { public GetSnapshottableFeaturesResponse(StreamInput in) throws IOException { super(in); - snapshottableFeatures = in.readImmutableList(SnapshottableFeature::new); + snapshottableFeatures = in.readCollectionAsImmutableList(SnapshottableFeature::new); } public List getSnapshottableFeatures() { @@ -38,7 +38,7 @@ public List getSnapshottableFeatures() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(snapshottableFeatures); + out.writeCollection(snapshottableFeatures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java index fff36f9bc6872..2c6769f5edd57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateRequest.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.features; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -19,7 +20,7 @@ /** Request for resetting feature state */ public class ResetFeatureStateRequest extends MasterNodeRequest { - private static final TransportVersion FEATURE_RESET_ON_MASTER = TransportVersion.V_7_14_0; + private static final TransportVersion FEATURE_RESET_ON_MASTER = TransportVersions.V_7_14_0; public static ResetFeatureStateRequest fromStream(StreamInput in) throws IOException { if (in.getTransportVersion().before(FEATURE_RESET_ON_MASTER)) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateResponse.java index 413b905dff738..d0deab97b35e1 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/features/ResetFeatureStateResponse.java @@ -43,7 +43,7 @@ public ResetFeatureStateResponse(List resetFeatureState public ResetFeatureStateResponse(StreamInput in) throws IOException { super(in); - this.resetFeatureStateStatusList = in.readList(ResetFeatureStateStatus::new); + this.resetFeatureStateStatusList = in.readCollectionAsList(ResetFeatureStateStatus::new); } /** @@ -69,7 +69,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(this.resetFeatureStateStatusList); + out.writeCollection(this.resetFeatureStateStatusList); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java index 35b6abed2c2f9..c3e2dd6e3b536 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsRequest.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.get; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.common.Strings; @@ -40,19 +41,19 @@ public class GetSnapshotsRequest extends MasterNodeRequest public static final String NO_POLICY_PATTERN = "_none"; public static final boolean DEFAULT_VERBOSE_MODE = true; - public static final TransportVersion SLM_POLICY_FILTERING_VERSION = TransportVersion.V_7_16_0; + public static final TransportVersion SLM_POLICY_FILTERING_VERSION = TransportVersions.V_7_16_0; - public static final TransportVersion FROM_SORT_VALUE_VERSION = TransportVersion.V_7_16_0; + public static final TransportVersion FROM_SORT_VALUE_VERSION = TransportVersions.V_7_16_0; - public static final TransportVersion MULTIPLE_REPOSITORIES_SUPPORT_ADDED = TransportVersion.V_7_14_0; + public static final TransportVersion MULTIPLE_REPOSITORIES_SUPPORT_ADDED = TransportVersions.V_7_14_0; - public static final TransportVersion PAGINATED_GET_SNAPSHOTS_VERSION = TransportVersion.V_7_14_0; + public static final TransportVersion PAGINATED_GET_SNAPSHOTS_VERSION = TransportVersions.V_7_14_0; - public static final TransportVersion NUMERIC_PAGINATION_VERSION = TransportVersion.V_7_15_0; + public static final TransportVersion NUMERIC_PAGINATION_VERSION = TransportVersions.V_7_15_0; - private static final TransportVersion SORT_BY_SHARDS_OR_REPO_VERSION = TransportVersion.V_7_16_0; + private static final TransportVersion SORT_BY_SHARDS_OR_REPO_VERSION = TransportVersions.V_7_16_0; - private static final TransportVersion INDICES_FLAG_VERSION = TransportVersion.V_8_3_0; + private static final TransportVersion INDICES_FLAG_VERSION = TransportVersions.V_8_3_0; public static final int NO_LIMIT = -1; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java index 9e6e486aecdff..3257ed1b986c3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java @@ -92,7 +92,7 @@ public GetSnapshotsResponse( } public GetSnapshotsResponse(StreamInput in) throws IOException { - this.snapshots = in.readImmutableList(SnapshotInfo::readFrom); + this.snapshots = in.readCollectionAsImmutableList(SnapshotInfo::readFrom); if (in.getTransportVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { final Map failedResponses = in.readMap(StreamInput::readException); this.failures = Collections.unmodifiableMap(failedResponses); @@ -148,9 +148,9 @@ public int remaining() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(snapshots); + out.writeCollection(snapshots); if (out.getTransportVersion().onOrAfter(GetSnapshotsRequest.MULTIPLE_REPOSITORIES_SUPPORT_ADDED)) { - out.writeMap(failures, StreamOutput::writeString, StreamOutput::writeException); + out.writeMap(failures, StreamOutput::writeException); out.writeOptionalString(next); } else { if (failures.isEmpty() == false) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java index d3050f34ea1bc..d8fd55451cc63 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotRequest.java @@ -38,7 +38,7 @@ public class GetShardSnapshotRequest extends MasterNodeRequest err.writeTo(o)); + out.writeMap(repositoryFailures, StreamOutput::writeWriteable); } public Optional getFailureForRepository(String repository) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java index 259a70fa6ff36..8d025653d47fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotRequest.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -48,7 +49,7 @@ public class RestoreSnapshotRequest extends MasterNodeRequest 0) { - builder.startArray("feature_states"); - for (String plugin : featureStates) { - builder.value(plugin); - } - builder.endArray(); + builder.array("feature_states", featureStates); } builder.field("include_global_state", includeGlobalState); builder.field("partial", partial); @@ -611,11 +604,7 @@ private void toXContentFragment(XContentBuilder builder, Params params) throws I } builder.endObject(); } - builder.startArray("ignore_index_settings"); - for (String ignoreIndexSetting : ignoreIndexSettings) { - builder.value(ignoreIndexSetting); - } - builder.endArray(); + builder.array("ignore_index_settings", ignoreIndexSettings); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java index 788867123a88c..956ce57d168e0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java @@ -63,7 +63,7 @@ public class SnapshotStatus implements ChunkedToXContentObject, Writeable { SnapshotStatus(StreamInput in) throws IOException { snapshot = new Snapshot(in); state = State.fromValue(in.readByte()); - shards = in.readImmutableList(SnapshotIndexShardStatus::new); + shards = in.readCollectionAsImmutableList(SnapshotIndexShardStatus::new); includeGlobalState = in.readOptionalBoolean(); final long startTime = in.readLong(); final long time = in.readLong(); @@ -164,7 +164,7 @@ public Map getIndices() { public void writeTo(StreamOutput out) throws IOException { snapshot.writeTo(out); out.writeByte(state.value()); - out.writeList(shards); + out.writeCollection(shards); out.writeOptionalBoolean(includeGlobalState); out.writeLong(stats.getStartTime()); out.writeLong(stats.getTime()); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java index a8dd1d54d3a03..578800edfb691 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotsStatusResponse.java @@ -34,7 +34,7 @@ public class SnapshotsStatusResponse extends ActionResponse implements ChunkedTo public SnapshotsStatusResponse(StreamInput in) throws IOException { super(in); - snapshots = in.readImmutableList(SnapshotStatus::new); + snapshots = in.readCollectionAsImmutableList(SnapshotStatus::new); } SnapshotsStatusResponse(List snapshots) { @@ -52,7 +52,7 @@ public List getSnapshots() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(snapshots); + out.writeCollection(snapshots); } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java index 658cfe297e2f5..2c70146ad7f3e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java @@ -159,12 +159,12 @@ public NodesSnapshotStatus(ClusterName clusterName, List nod @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeSnapshotStatus::new); + return in.readCollectionAsList(NodeSnapshotStatus::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } @@ -174,7 +174,7 @@ public static class NodeRequest extends TransportRequest { public NodeRequest(StreamInput in) throws IOException { super(in); - snapshots = in.readList(Snapshot::new); + snapshots = in.readCollectionAsList(Snapshot::new); } NodeRequest(TransportNodesSnapshotsStatus.Request request) { @@ -184,7 +184,7 @@ public NodeRequest(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(snapshots); + out.writeCollection(snapshots); } } @@ -210,11 +210,7 @@ public Map> status() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); if (status != null) { - out.writeMap( - status, - (o, s) -> s.writeTo(o), - (output, v) -> output.writeMap(v, (o, shardId) -> shardId.writeTo(o), (o, sis) -> sis.writeTo(o)) - ); + out.writeMap(status, StreamOutput::writeWriteable, StreamOutput::writeMap); } else { out.writeVInt(0); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index f23ee6242b5c8..c33bc841190a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.master.TransportMasterNodeReadAction; @@ -26,6 +25,7 @@ import org.elasticsearch.cluster.metadata.Metadata.Custom; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -138,9 +138,9 @@ public void onTimeout(TimeValue timeout) { } } - @SuppressForbidden(reason = "exposing ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "exposing ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } private ClusterStateResponse buildResponse(final ClusterStateRequest request, final ClusterState currentState) { @@ -151,7 +151,7 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi if (request.nodes()) { builder.nodes(currentState.nodes()); - builder.transportVersions(getTransportVersions(currentState)); + builder.compatibilityVersions(getCompatibilityVersions(currentState)); } if (request.routingTable()) { if (request.indices().length > 0) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java index 592d31e38fcf0..f8d894e4de48b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/AnalysisStats.java @@ -36,7 +36,7 @@ import java.util.Set; import java.util.TreeMap; -import static org.elasticsearch.TransportVersion.V_8_500_045; +import static org.elasticsearch.TransportVersions.V_8_500_045; /** * Statistics about analysis usage. @@ -275,14 +275,14 @@ private static Set sort(Collection set) { } public AnalysisStats(StreamInput input) throws IOException { - usedCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedBuiltInCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedBuiltInTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedBuiltInTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); - usedBuiltInAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readList(IndexFeatureStats::new))); + usedCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedBuiltInCharFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedBuiltInTokenizers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedBuiltInTokenFilters = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); + usedBuiltInAnalyzers = Collections.unmodifiableSet(new LinkedHashSet<>(input.readCollectionAsList(IndexFeatureStats::new))); if (input.getTransportVersion().onOrAfter(SYNONYM_SETS_VERSION)) { usedSynonyms = input.readImmutableMap(SynonymsStats::new); } else { @@ -301,7 +301,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(usedBuiltInTokenFilters); out.writeCollection(usedBuiltInAnalyzers); if (out.getTransportVersion().onOrAfter(SYNONYM_SETS_VERSION)) { - out.writeMap(usedSynonyms, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(usedSynonyms, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java index f233cf57961c8..b5acc3cffdb34 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsNodeResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.stats.NodeStats; import org.elasticsearch.action.admin.indices.stats.ShardStats; @@ -38,7 +38,7 @@ public ClusterStatsNodeResponse(StreamInput in) throws IOException { this.nodeInfo = new NodeInfo(in); this.nodeStats = new NodeStats(in); shardsStats = in.readArray(ShardStats::new, ShardStats[]::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats = new SearchUsageStats(in); } else { searchUsageStats = new SearchUsageStats(); @@ -101,7 +101,7 @@ public void writeTo(StreamOutput out) throws IOException { nodeInfo.writeTo(out); nodeStats.writeTo(out); out.writeArray(shardsStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchUsageStats.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java index 080edf8810d39..be77447cb1738 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/ClusterStatsResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodesResponse; import org.elasticsearch.cluster.ClusterName; @@ -43,12 +43,12 @@ public ClusterStatsResponse(StreamInput in) throws IOException { MappingStats mappingStats = in.readOptionalWriteable(MappingStats::new); AnalysisStats analysisStats = in.readOptionalWriteable(AnalysisStats::new); VersionStats versionStats = null; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { versionStats = in.readOptionalWriteable(VersionStats::new); } this.clusterUUID = clusterUUID; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { clusterSnapshotStats = ClusterSnapshotStats.readFrom(in); } else { clusterSnapshotStats = ClusterSnapshotStats.EMPTY; @@ -115,23 +115,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(clusterUUID); out.writeOptionalWriteable(indicesStats.getMappings()); out.writeOptionalWriteable(indicesStats.getAnalysis()); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { out.writeOptionalWriteable(indicesStats.getVersions()); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { clusterSnapshotStats.writeTo(out); } } @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(ClusterStatsNodeResponse::readNodeResponse); + return in.readCollectionAsList(ClusterStatsNodeResponse::readNodeResponse); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { // nodeStats and indicesStats are rebuilt from nodes - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldStats.java index 0ca37fe9e9852..1341297526abf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/FieldStats.java @@ -34,7 +34,7 @@ public class FieldStats extends IndexFeatureStats { FieldStats(StreamInput in) throws IOException { super(in); scriptCount = in.readVInt(); - scriptLangs = in.readSet(StreamInput::readString); + scriptLangs = in.readCollectionAsSet(StreamInput::readString); fieldScriptStats = new FieldScriptStats(in); } @@ -42,7 +42,7 @@ public class FieldStats extends IndexFeatureStats { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(scriptCount); - out.writeCollection(scriptLangs, StreamOutput::writeString); + out.writeStringCollection(scriptLangs); fieldScriptStats.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java index bc0ed5e70c59a..197a5d839eecf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/MappingStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.cluster.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; @@ -207,7 +207,7 @@ private static int countOccurrences(String script, Pattern pattern) { } MappingStats(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { totalFieldCount = in.readOptionalVLong(); totalDeduplicatedFieldCount = in.readOptionalVLong(); totalMappingSizeBytes = in.readOptionalVLong(); @@ -216,13 +216,13 @@ private static int countOccurrences(String script, Pattern pattern) { totalDeduplicatedFieldCount = null; totalMappingSizeBytes = null; } - fieldTypeStats = in.readImmutableList(FieldStats::new); - runtimeFieldStats = in.readImmutableList(RuntimeFieldStats::new); + fieldTypeStats = in.readCollectionAsImmutableList(FieldStats::new); + runtimeFieldStats = in.readCollectionAsImmutableList(RuntimeFieldStats::new); } @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalVLong(totalFieldCount); out.writeOptionalVLong(totalDeduplicatedFieldCount); out.writeOptionalVLong(totalMappingSizeBytes); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RuntimeFieldStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RuntimeFieldStats.java index de9c3e6a0dd54..6c182eaf2ad05 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RuntimeFieldStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/RuntimeFieldStats.java @@ -38,7 +38,7 @@ public RuntimeFieldStats(StreamInput in) throws IOException { this.type = in.readString(); this.count = in.readInt(); this.indexCount = in.readInt(); - this.scriptLangs = in.readSet(StreamInput::readString); + this.scriptLangs = in.readCollectionAsSet(StreamInput::readString); this.scriptLessCount = in.readLong(); this.shadowedCount = in.readLong(); this.fieldScriptStats = new FieldScriptStats(in); @@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeInt(count); out.writeInt(indexCount); - out.writeCollection(scriptLangs, StreamOutput::writeString); + out.writeStringCollection(scriptLangs); out.writeLong(scriptLessCount); out.writeLong(shadowedCount); fieldScriptStats.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java index fa60016e93f94..372ca49a252c8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/SearchUsageStats.java @@ -58,8 +58,8 @@ public SearchUsageStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(queries, StreamOutput::writeString, StreamOutput::writeLong); - out.writeMap(sections, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(queries, StreamOutput::writeLong); + out.writeMap(sections, StreamOutput::writeLong); out.writeVLong(totalSearchCount); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java index e0c2e511c6879..703a650489bbd 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/stats/VersionStats.java @@ -99,7 +99,7 @@ public static VersionStats of(Metadata metadata, List } VersionStats(StreamInput in) throws IOException { - this.versionStats = Collections.unmodifiableSet(new TreeSet<>(in.readList(SingleVersionStats::new))); + this.versionStats = Collections.unmodifiableSet(new TreeSet<>(in.readCollectionAsList(SingleVersionStats::new))); } public Set versionStats() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java index bde84b84eb0e7..d9eeb07a53c2c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/tasks/PendingClusterTasksResponse.java @@ -26,7 +26,7 @@ public class PendingClusterTasksResponse extends ActionResponse implements Chunk public PendingClusterTasksResponse(StreamInput in) throws IOException { super(in); - pendingTasks = in.readList(PendingClusterTask::new); + pendingTasks = in.readCollectionAsList(PendingClusterTask::new); } PendingClusterTasksResponse(List pendingTasks) { @@ -91,7 +91,7 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(pendingTasks); + out.writeCollection(pendingTasks); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java index e9f49efd4cb1c..499698593a14b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/alias/IndicesAliasesRequest.java @@ -61,7 +61,7 @@ public class IndicesAliasesRequest extends AcknowledgedRequest> aliases, Map i.readList(AliasMetadata::new)); - dataStreamAliases = in.readMap(in1 -> in1.readList(DataStreamAlias::new)); + aliases = in.readImmutableOpenMap(StreamInput::readString, i -> i.readCollectionAsList(AliasMetadata::new)); + dataStreamAliases = in.readMap(in1 -> in1.readCollectionAsList(DataStreamAlias::new)); } public Map> getAliases() { @@ -45,8 +45,8 @@ public Map> getDataStreamAliases() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(aliases, StreamOutput::writeString, StreamOutput::writeList); - out.writeMap(dataStreamAliases, StreamOutput::writeString, StreamOutput::writeList); + out.writeMap(aliases, StreamOutput::writeCollection); + out.writeMap(dataStreamAliases, StreamOutput::writeCollection); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java index c7d77e7d39517..664c2559cf2fa 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/AnalyzeAction.java @@ -78,8 +78,8 @@ public Request() {} text = in.readStringArray(); analyzer = in.readOptionalString(); tokenizer = in.readOptionalWriteable(NameOrDefinition::new); - tokenFilters.addAll(in.readList(NameOrDefinition::new)); - charFilters.addAll(in.readList(NameOrDefinition::new)); + tokenFilters.addAll(in.readCollectionAsList(NameOrDefinition::new)); + charFilters.addAll(in.readCollectionAsList(NameOrDefinition::new)); field = in.readOptionalString(); explain = in.readBoolean(); attributes = in.readStringArray(); @@ -251,8 +251,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(text); out.writeOptionalString(analyzer); out.writeOptionalWriteable(tokenizer); - out.writeList(tokenFilters); - out.writeList(charFilters); + out.writeCollection(tokenFilters); + out.writeCollection(charFilters); out.writeOptionalString(field); out.writeBoolean(explain); out.writeStringArray(attributes); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java index e09829b455dd5..26432922c1f24 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersRequest.java @@ -17,7 +17,7 @@ import java.util.Arrays; import java.util.Objects; -import static org.elasticsearch.TransportVersion.V_8_500_034; +import static org.elasticsearch.TransportVersions.V_8_500_034; /** * Request for reloading index search analyzers diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java index 0e6149476659c..4f388804f2340 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/ReloadAnalyzersResponse.java @@ -125,7 +125,7 @@ public static ReloadAnalyzersResponse fromXContent(XContentParser parser) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(reloadDetails, StreamOutput::writeString, (stream, details) -> details.writeTo(stream)); + out.writeMap(reloadDetails, StreamOutput::writeWriteable); } @Override @@ -159,8 +159,8 @@ public ReloadDetails(String name, Set reloadedIndicesNodes, Set ReloadDetails(StreamInput in) throws IOException { this.indexName = in.readString(); - this.reloadedIndicesNodes = new HashSet<>(in.readList(StreamInput::readString)); - this.reloadedAnalyzers = new HashSet<>(in.readList(StreamInput::readString)); + this.reloadedIndicesNodes = new HashSet<>(in.readCollectionAsList(StreamInput::readString)); + this.reloadedAnalyzers = new HashSet<>(in.readCollectionAsList(StreamInput::readString)); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java index cd1198916a384..8625f0f4207a0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/analyze/TransportReloadAnalyzersAction.java @@ -144,7 +144,7 @@ private ReloadResult(String index, String nodeId, List reloadedSearchAna private ReloadResult(StreamInput in) throws IOException { this.index = in.readString(); this.nodeId = in.readString(); - this.reloadedSearchAnalyzers = in.readStringList(); + this.reloadedSearchAnalyzers = in.readStringCollectionAsList(); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java index 44f743b3da4aa..1c02ac868cfc7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/close/CloseIndexResponse.java @@ -32,7 +32,7 @@ public class CloseIndexResponse extends ShardsAcknowledgedResponse { CloseIndexResponse(StreamInput in) throws IOException { super(in, true); - indices = in.readImmutableList(IndexResult::new); + indices = in.readCollectionAsImmutableList(IndexResult::new); } public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -48,7 +48,7 @@ public List getIndices() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeShardsAcknowledged(out); - out.writeList(indices); + out.writeCollection(indices); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java index a54f6720511f7..136f261dc3ef3 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/create/CreateIndexRequest.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -81,7 +81,7 @@ public CreateIndexRequest(StreamInput in) throws IOException { cause = in.readString(); index = in.readString(); settings = readSettingsFromStream(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { int size = in.readVInt(); assert size <= 1 : "Expected to read 0 or 1 mappings, but received " + size; if (size == 1) { @@ -99,7 +99,7 @@ public CreateIndexRequest(StreamInput in) throws IOException { aliases.add(new Alias(in)); } waitForActiveShards = ActiveShardCount.readFrom(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { origin = in.readString(); } } @@ -452,7 +452,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(cause); out.writeString(index); settings.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { if ("{}".equals(mappings)) { out.writeVInt(0); } else { @@ -465,7 +465,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(aliases); waitForActiveShards.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeString(origin); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java index 55472e999e72d..c782abed32190 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/FindDanglingIndexResponse.java @@ -37,11 +37,11 @@ public FindDanglingIndexResponse( @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeFindDanglingIndexResponse::new); + return in.readCollectionAsList(NodeFindDanglingIndexResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java index 3e13c5b905dd7..30d9bb701c65c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/find/NodeFindDanglingIndexResponse.java @@ -40,12 +40,12 @@ public NodeFindDanglingIndexResponse(DiscoveryNode node, List dan protected NodeFindDanglingIndexResponse(StreamInput in) throws IOException { super(in); - this.danglingIndexInfo = in.readList(IndexMetadata::readFrom); + this.danglingIndexInfo = in.readCollectionAsList(IndexMetadata::readFrom); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(this.danglingIndexInfo); + out.writeCollection(this.danglingIndexInfo); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java index 138df5f15881c..b61b6318a20b6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/ListDanglingIndicesResponse.java @@ -94,12 +94,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeListDanglingIndicesResponse::new); + return in.readCollectionAsList(NodeListDanglingIndicesResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } // visible for testing diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java index d5cde54bdc87a..f56dadd6a2482 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/dangling/list/NodeListDanglingIndicesResponse.java @@ -34,12 +34,12 @@ public NodeListDanglingIndicesResponse(DiscoveryNode node, List v.writeTo(o)); + out.writeMap(stats, StreamOutput::writeWriteable); } Map getStats() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java index 6052543939819..12abb10be4aba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/diskusage/IndexDiskUsageStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.diskusage; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -61,7 +61,7 @@ public IndexDiskUsageStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(fields, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(fields, StreamOutput::writeWriteable); out.writeVLong(indexSizeInBytes); } @@ -189,7 +189,7 @@ private PerFieldDiskUsage(StreamInput in) throws IOException { pointsBytes = in.readVLong(); normsBytes = in.readVLong(); termVectorsBytes = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { knnVectorsBytes = in.readVLong(); } } @@ -202,7 +202,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(pointsBytes); out.writeVLong(normsBytes); out.writeVLong(termVectorsBytes); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeVLong(knnVectorsBytes); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java index 6dca3285269ed..0bd4458056c20 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/flush/TransportShardFlushAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.flush; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.ActionFilters; @@ -103,7 +103,7 @@ private static class PreShardSyncedFlushRequest extends TransportRequest { private PreShardSyncedFlushRequest(StreamInput in) throws IOException { super(in); - assert in.getTransportVersion().before(TransportVersion.V_8_0_0) : "received pre_sync request from a new node"; + assert in.getTransportVersion().before(TransportVersions.V_8_0_0) : "received pre_sync request from a new node"; this.shardId = new ShardId(in); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java index 7f77b0da0f688..241f1a0c7fbf6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/forcemerge/ForceMergeRequest.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.forcemerge; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.common.UUIDs; @@ -47,7 +48,7 @@ public static final class Defaults { */ private boolean shouldStoreResult; - private static final TransportVersion FORCE_MERGE_UUID_SIMPLE_VERSION = TransportVersion.V_8_0_0; + private static final TransportVersion FORCE_MERGE_UUID_SIMPLE_VERSION = TransportVersions.V_8_0_0; /** * Force merge UUID to store in the live commit data of a shard under diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java index bd9e827545b9e..90fe22d0ee9d7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/get/GetIndexResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; @@ -75,7 +75,7 @@ public GetIndexResponse( GetIndexResponse(StreamInput in) throws IOException { super(in); this.indices = in.readStringArray(); - mappings = in.readImmutableOpenMap(StreamInput::readString, in.getTransportVersion().before(TransportVersion.V_8_0_0) ? i -> { + mappings = in.readImmutableOpenMap(StreamInput::readString, in.getTransportVersion().before(TransportVersions.V_8_0_0) ? i -> { int numMappings = i.readVInt(); assert numMappings == 0 || numMappings == 1 : "Expected 0 or 1 mappings but got " + numMappings; if (numMappings == 1) { @@ -87,7 +87,7 @@ public GetIndexResponse( } } : i -> i.readBoolean() ? new MappingMetadata(i) : MappingMetadata.EMPTY_MAPPINGS); - aliases = in.readImmutableOpenMap(StreamInput::readString, i -> i.readList(AliasMetadata::new)); + aliases = in.readImmutableOpenMap(StreamInput::readString, i -> i.readCollectionAsList(AliasMetadata::new)); settings = in.readImmutableOpenMap(StreamInput::readString, Settings::readSettingsFromStream); defaultSettings = in.readImmutableOpenMap(StreamInput::readString, Settings::readSettingsFromStream); dataStreams = in.readImmutableOpenMap(StreamInput::readString, StreamInput::readOptionalString); @@ -173,10 +173,10 @@ public String getSetting(String index, String setting) { public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); MappingMetadata.writeMappingMetadata(out, mappings); - out.writeMap(aliases, StreamOutput::writeString, StreamOutput::writeList); - out.writeMap(settings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - out.writeMap(defaultSettings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - out.writeMap(dataStreams, StreamOutput::writeString, StreamOutput::writeOptionalString); + out.writeMap(aliases, StreamOutput::writeCollection); + out.writeMap(settings, StreamOutput::writeWriteable); + out.writeMap(defaultSettings, StreamOutput::writeWriteable); + out.writeMap(dataStreams, StreamOutput::writeOptionalString); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java index 47a17006dc4bf..b4ab91a52c97f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetFieldMappingsIndexRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; @@ -28,12 +28,12 @@ public class GetFieldMappingsIndexRequest extends SingleShardRequest { - if (mapIn.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (mapIn.getTransportVersion().before(TransportVersions.V_8_0_0)) { int typesSize = mapIn.readVInt(); assert typesSize == 1 || typesSize == 0 : "Expected 0 or 1 types but got " + typesSize; if (typesSize == 0) { @@ -153,12 +153,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(mappings, StreamOutput::writeString, (outpt, map) -> { - if (outpt.getTransportVersion().before(TransportVersion.V_8_0_0)) { + out.writeMap(mappings, (outpt, map) -> { + if (outpt.getTransportVersion().before(TransportVersions.V_8_0_0)) { outpt.writeVInt(1); outpt.writeString(MapperService.SINGLE_MAPPING_NAME); } - outpt.writeMap(map, StreamOutput::writeString, (o, v) -> { + outpt.writeMap(map, (o, v) -> { o.writeString(v.fullName()); o.writeBytesReference(v.source); }); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java index 55210e3e5a4b9..f013d2bb518d5 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/get/GetMappingsResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.mapping.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.Strings; @@ -40,7 +40,7 @@ public GetMappingsResponse(Map mappings) { GetMappingsResponse(StreamInput in) throws IOException { super(in); - mappings = in.readImmutableMap(in.getTransportVersion().before(TransportVersion.V_8_0_0) ? i -> { + mappings = in.readImmutableMap(in.getTransportVersion().before(TransportVersions.V_8_0_0) ? i -> { int mappingCount = i.readVInt(); assert mappingCount == 1 || mappingCount == 0 : "Expected 0 or 1 mappings but got " + mappingCount; if (mappingCount == 1) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java index 4d1a10e1bed25..eb758a40da5fe 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/mapping/put/PutMappingRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.mapping.put; import org.elasticsearch.ElasticsearchGenerationException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -79,7 +79,7 @@ public PutMappingRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readOptionalString(); if (MapperService.SINGLE_MAPPING_NAME.equals(type) == false) { throw new IllegalArgumentException("Expected type [_doc] but received [" + type + "]"); @@ -88,7 +88,7 @@ public PutMappingRequest(StreamInput in) throws IOException { source = in.readString(); concreteIndex = in.readOptionalWriteable(Index::new); origin = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { writeIndexOnly = in.readBoolean(); } } @@ -311,13 +311,13 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArrayNullable(indices); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(source); out.writeOptionalWriteable(concreteIndex); out.writeOptionalString(origin); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { out.writeBoolean(writeIndexOnly); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java index 38bd9b6432abc..d63be14406a7f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/readonly/AddIndexBlockResponse.java @@ -33,7 +33,7 @@ public class AddIndexBlockResponse extends ShardsAcknowledgedResponse { AddIndexBlockResponse(StreamInput in) throws IOException { super(in, true); - indices = in.readImmutableList(AddBlockResult::new); + indices = in.readCollectionAsImmutableList(AddBlockResult::new); } public AddIndexBlockResponse(final boolean acknowledged, final boolean shardsAcknowledged, final List indices) { @@ -49,7 +49,7 @@ public List getIndices() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeShardsAcknowledged(out); - out.writeList(indices); + out.writeCollection(indices); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java index a3b9432756b1f..4e58fbab4bd7c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/recovery/RecoveryResponse.java @@ -91,7 +91,7 @@ public Iterator toXContentChunked(ToXContent.Params params) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMapOfLists(shardRecoveryStates, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(shardRecoveryStates, StreamOutput::writeCollection); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java index 6c1ba44125550..e86edfc53436f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/resolve/ResolveIndexAction.java @@ -390,9 +390,9 @@ public Response(List indices, List aliases, List getIndices() { @@ -409,9 +409,9 @@ public List getDataStreams() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(indices); - out.writeList(aliases); - out.writeList(dataStreams); + out.writeCollection(indices); + out.writeCollection(aliases); + out.writeCollection(dataStreams); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java index 678ec96c217ca..40579b1acf361 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MaxPrimaryShardDocsCondition.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public static MaxPrimaryShardDocsCondition fromXContent(XContentParser parser) t @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersion.V_8_2_0); + return version.onOrAfter(TransportVersions.V_8_2_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java index a2b9fa6c04f89..42f9cbd5a970b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MetadataRolloverService.java @@ -32,6 +32,9 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.indices.SystemDataStreamDescriptor; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.snapshots.SnapshotInProgressException; @@ -291,9 +294,10 @@ private RolloverResult rolloverDataStream( currentState, createIndexClusterStateRequest, silent, - (builder, indexMetadata) -> builder.put( - dataStream.rollover(indexMetadata.getIndex(), newGeneration, metadata.isTimeSeriesTemplate(templateV2)) - ), + (builder, indexMetadata) -> { + downgradeBrokenTsdbBackingIndices(dataStream, builder); + builder.put(dataStream.rollover(indexMetadata.getIndex(), newGeneration, metadata.isTimeSeriesTemplate(templateV2))); + }, rerouteCompletionIsNotRequired() ); @@ -312,6 +316,30 @@ private RolloverResult rolloverDataStream( return new RolloverResult(newWriteIndexName, originalWriteIndex.getName(), newState); } + /** + * This method before rollover fixes tsdb backing indices with no start and end time index settings set by + * removing the index.mode and index.routing_path index settings. This downgrades these indices to regular indices. + * Due to a bug data streams may exist that + * have backing indices with no start and end time index settings set. + * Note that as part of rollover the new backing index will be in tsdb mode. + */ + private static void downgradeBrokenTsdbBackingIndices(DataStream dataStream, Metadata.Builder builder) { + for (Index indexName : dataStream.getIndices()) { + var index = builder.getSafe(indexName); + final Settings originalSettings = index.getSettings(); + if (IndexVersion.V_8_11_0.after(index.getCreationVersion()) + && index.getIndexMode() == IndexMode.TIME_SERIES + && originalSettings.keySet().contains(IndexSettings.TIME_SERIES_START_TIME.getKey()) == false + && originalSettings.keySet().contains(IndexSettings.TIME_SERIES_END_TIME.getKey()) == false) { + final Settings.Builder settingsBuilder = Settings.builder().put(originalSettings); + settingsBuilder.remove(IndexSettings.MODE.getKey()); + settingsBuilder.remove(IndexMetadata.INDEX_ROUTING_PATH.getKey()); + long newVersion = index.getSettingsVersion() + 1; + builder.put(IndexMetadata.builder(index).settings(settingsBuilder.build()).settingsVersion(newVersion)); + } + } + } + public Metadata.Builder withShardSizeForecastForWriteIndex(String dataStreamName, Metadata.Builder metadata) { final DataStream dataStream = metadata.dataStream(dataStreamName); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java index 98958d3b015c7..5f55d01a0ab43 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinAgeCondition.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -65,6 +66,6 @@ public static MinAgeCondition fromXContent(XContentParser parser) throws IOExcep @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersion.V_8_4_0); + return version.onOrAfter(TransportVersions.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java index 8c6274cfadb81..28af3df8c7e22 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinDocsCondition.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public static MinDocsCondition fromXContent(XContentParser parser) throws IOExce @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersion.V_8_4_0); + return version.onOrAfter(TransportVersions.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java index 6aaea57e5b55b..c89b34345db8c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardDocsCondition.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public static MinPrimaryShardDocsCondition fromXContent(XContentParser parser) t @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersion.V_8_4_0); + return version.onOrAfter(TransportVersions.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java index d7149e2a91be4..7ef63186c76a8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinPrimaryShardSizeCondition.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -64,6 +65,6 @@ public static MinPrimaryShardSizeCondition fromXContent(XContentParser parser) t @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersion.V_8_4_0); + return version.onOrAfter(TransportVersions.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java index 52db7ff90cf26..318f385842707 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/MinSizeCondition.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.rollover; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.ByteSizeValue; @@ -64,6 +65,6 @@ public static MinSizeCondition fromXContent(XContentParser parser) throws IOExce @Override boolean includedInVersion(TransportVersion version) { - return version.onOrAfter(TransportVersion.V_8_4_0); + return version.onOrAfter(TransportVersions.V_8_4_0); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java index 628ccb75f543f..8a886e5875c19 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConditions.java @@ -115,9 +115,8 @@ public boolean hasMinConditions() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection( - conditions.values().stream().filter(c -> c.includedInVersion(out.getTransportVersion())).toList(), - StreamOutput::writeNamedWriteable + out.writeNamedWriteableCollection( + conditions.values().stream().filter(c -> c.includedInVersion(out.getTransportVersion())).toList() ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConfiguration.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConfiguration.java index ad5237eac56b3..5e0ca49c138bb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConfiguration.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverConfiguration.java @@ -65,13 +65,13 @@ public RolloverConfiguration(RolloverConditions concreteConditions) { } public RolloverConfiguration(StreamInput in) throws IOException { - this(new RolloverConditions(in), in.readSet(StreamInput::readString)); + this(new RolloverConditions(in), in.readCollectionAsSet(StreamInput::readString)); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeWriteable(concreteConditions); - out.writeCollection(automaticConditions, StreamOutput::writeString); + out.writeStringCollection(automaticConditions); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java index 710794cf9c87b..318d4b7eecb54 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverInfo.java @@ -61,7 +61,7 @@ public RolloverInfo(String alias, List> metConditions, long time) { public RolloverInfo(StreamInput in) throws IOException { this.alias = in.readString(); this.time = in.readVLong(); - this.metConditions = (List) in.readNamedWriteableList(Condition.class); + this.metConditions = (List) in.readNamedWriteableCollectionAsList(Condition.class); } public static RolloverInfo parse(XContentParser parser, String alias) { @@ -88,7 +88,7 @@ public static Diff readDiffFrom(StreamInput in) throws IOException public void writeTo(StreamOutput out) throws IOException { out.writeString(alias); out.writeVLong(time); - out.writeNamedWriteableList(metConditions); + out.writeNamedWriteableCollection(metConditions); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java index b9416fbfa2164..008a379ecbadb 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/rollover/RolloverResponse.java @@ -120,7 +120,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(oldIndex); out.writeString(newIndex); - out.writeMap(conditionStatus, StreamOutput::writeString, StreamOutput::writeBoolean); + out.writeMap(conditionStatus, StreamOutput::writeBoolean); out.writeBoolean(dryRun); out.writeBoolean(rolledOver); out.writeBoolean(shardsAcknowledged); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java index 204474654bb0d..03a222bd0db4f 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/IndicesSegmentsRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.segments; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.broadcast.BroadcastRequest; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -28,7 +28,7 @@ public IndicesSegmentsRequest() { public IndicesSegmentsRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readBoolean(); // old 'verbose' option, since removed } } @@ -40,7 +40,7 @@ public IndicesSegmentsRequest(String... indices) { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeBoolean(false); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java index e782de4b56b43..bdb94c7b11f02 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/segments/ShardSegments.java @@ -31,7 +31,7 @@ public class ShardSegments implements Writeable, Iterable { ShardSegments(StreamInput in) throws IOException { shardRouting = new ShardRouting(in); - segments = in.readList(Segment::new); + segments = in.readCollectionAsList(Segment::new); } @Override @@ -70,6 +70,6 @@ public int getNumberOfSearch() { @Override public void writeTo(StreamOutput out) throws IOException { shardRouting.writeTo(out); - out.writeList(segments); + out.writeCollection(segments); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java index c3bb5dc71dc12..e129fdf448cba 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/get/GetSettingsResponse.java @@ -79,8 +79,8 @@ public String getSetting(String index, String setting) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(indexToSettings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - out.writeMap(indexToDefaultSettings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(indexToSettings, StreamOutput::writeWriteable); + out.writeMap(indexToDefaultSettings, StreamOutput::writeWriteable); } private static void parseSettingsField( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java index ad9c4904ecc86..4e31fbc2b5732 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/settings/put/UpdateSettingsRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.admin.indices.settings.put; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -54,7 +54,7 @@ public UpdateSettingsRequest(StreamInput in) throws IOException { indicesOptions = IndicesOptions.readIndicesOptions(in); settings = readSettingsFromStream(in); preserveExisting = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { origin = in.readString(); } } @@ -183,7 +183,7 @@ public void writeTo(StreamOutput out) throws IOException { indicesOptions.writeIndicesOptions(out); settings.writeTo(out); out.writeBoolean(preserveExisting); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeString(origin); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java index 20f6430c91b70..a4a8a475ae8b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresRequest.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.admin.indices.shards; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -54,7 +54,7 @@ public IndicesShardStoresRequest(StreamInput in) throws IOException { statuses.add(ClusterHealthStatus.readFrom(in)); } indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { maxConcurrentShardRequests = in.readVInt(); } else { // earlier versions had unlimited concurrency @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringArrayNullable(indices); out.writeCollection(statuses, (o, v) -> o.writeByte(v.value())); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeVInt(maxConcurrentShardRequests); } else if (maxConcurrentShardRequests != DEFAULT_MAX_CONCURRENT_SHARD_REQUESTS) { throw new IllegalArgumentException( diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java index 8e18e4c689133..e9b27629beebf 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/shards/IndicesShardStoresResponse.java @@ -245,8 +245,10 @@ public IndicesShardStoresResponse(Map>> s public IndicesShardStoresResponse(StreamInput in) throws IOException { super(in); - storeStatuses = in.readImmutableMap(i -> i.readImmutableMap(StreamInput::readInt, j -> j.readImmutableList(StoreStatus::new))); - failures = in.readImmutableList(Failure::readFailure); + storeStatuses = in.readImmutableMap( + i -> i.readImmutableMap(StreamInput::readInt, j -> j.readCollectionAsImmutableList(StoreStatus::new)) + ); + failures = in.readCollectionAsImmutableList(Failure::readFailure); } /** @@ -267,12 +269,8 @@ public List getFailures() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap( - storeStatuses, - StreamOutput::writeString, - (o, v) -> o.writeMap(v, StreamOutput::writeInt, StreamOutput::writeCollection) - ); - out.writeList(failures); + out.writeMap(storeStatuses, (o, v) -> o.writeMap(v, StreamOutput::writeInt, StreamOutput::writeCollection)); + out.writeCollection(failures); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java index 5af82874652a4..76500964be750 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStats.java @@ -10,6 +10,7 @@ import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -46,8 +47,8 @@ public class CommonStats implements Writeable, ToXContentFragment { - private static final TransportVersion VERSION_SUPPORTING_NODE_MAPPINGS = TransportVersion.V_8_5_0; - private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersion.V_8_500_058; + private static final TransportVersion VERSION_SUPPORTING_NODE_MAPPINGS = TransportVersions.V_8_5_0; + private static final TransportVersion VERSION_SUPPORTING_DENSE_VECTOR_STATS = TransportVersions.V_8_500_058; @Nullable public DocsStats docs; @@ -208,7 +209,7 @@ public CommonStats(StreamInput in) throws IOException { translog = in.readOptionalWriteable(TranslogStats::new); requestCache = in.readOptionalWriteable(RequestCacheStats::new); recoveryStats = in.readOptionalWriteable(RecoveryStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { bulk = in.readOptionalWriteable(BulkStats::new); } shards = in.readOptionalWriteable(ShardCountStats::new); @@ -238,7 +239,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(translog); out.writeOptionalWriteable(requestCache); out.writeOptionalWriteable(recoveryStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeOptionalWriteable(bulk); } out.writeOptionalWriteable(shards); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java index 5fba3e737d7ba..ae4f20e420fca 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/CommonStatsFlags.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -55,7 +55,7 @@ public CommonStatsFlags(StreamInput in) throws IOException { flags.add(flag); } } - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readStringArray(); } groups = in.readStringArray(); @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeLong(longFlags); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeStringArrayNullable(Strings.EMPTY_ARRAY); } out.writeStringArrayNullable(groups); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java index 3b5f919816708..5a16ecf976354 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/FieldUsageStatsResponse.java @@ -35,13 +35,13 @@ public class FieldUsageStatsResponse extends ChunkedBroadcastResponse { FieldUsageStatsResponse(StreamInput in) throws IOException { super(in); - stats = in.readMap(i -> i.readList(FieldUsageShardResponse::new)); + stats = in.readMap(i -> i.readCollectionAsList(FieldUsageShardResponse::new)); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(stats, StreamOutput::writeString, StreamOutput::writeList); + out.writeMap(stats, StreamOutput::writeCollection); } public Map> getStats() { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java index 8e8b5aaf13c45..70959babf312e 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/IndicesStatsResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ClusterStatsLevel; import org.elasticsearch.action.admin.indices.stats.IndexStats.IndexStatsBuilder; import org.elasticsearch.action.support.DefaultShardOperationFailedException; @@ -51,7 +51,7 @@ public class IndicesStatsResponse extends ChunkedBroadcastResponse { IndicesStatsResponse(StreamInput in) throws IOException { super(in); shards = in.readArray(ShardStats::new, ShardStats[]::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { indexHealthMap = in.readMap(ClusterHealthStatus::readFrom); indexStateMap = in.readMap(IndexMetadata.State::readFrom); } else { @@ -171,9 +171,9 @@ public CommonStats getPrimaries() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeArray(shards); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { - out.writeMap(indexHealthMap, StreamOutput::writeString, (o, s) -> s.writeTo(o)); - out.writeMap(indexStateMap, StreamOutput::writeString, (o, s) -> s.writeTo(o)); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { + out.writeMap(indexHealthMap, StreamOutput::writeWriteable); + out.writeMap(indexStateMap, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java index 3f8b005ca13e5..f90dc894f1b57 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/stats/ShardStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.stats; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +28,7 @@ public class ShardStats implements Writeable, ToXContentFragment { - private static final TransportVersion DEDUPLICATE_SHARD_PATH_VERSION = TransportVersion.V_8_4_0; + private static final TransportVersion DEDUPLICATE_SHARD_PATH_VERSION = TransportVersions.V_8_4_0; private final ShardRouting shardRouting; private final CommonStats commonStats; @@ -60,7 +61,7 @@ public ShardStats(StreamInput in) throws IOException { isCustomDataPath = in.readBoolean(); seqNoStats = in.readOptionalWriteable(SeqNoStats::new); retentionLeaseStats = in.readOptionalWriteable(RetentionLeaseStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { isSearchIdle = in.readBoolean(); searchIdleTime = in.readVLong(); } else { @@ -214,7 +215,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isCustomDataPath); out.writeOptionalWriteable(seqNoStats); out.writeOptionalWriteable(retentionLeaseStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(isSearchIdle); out.writeVLong(searchIdleTime); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java index 3b345e9caa658..ae73904a8447b 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComponentTemplateAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.template.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -57,7 +57,7 @@ public Request(String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -68,7 +68,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(includeDefaults); } } @@ -121,7 +121,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); componentTemplates = in.readMap(ComponentTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -148,8 +148,8 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(componentTemplates, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + out.writeMap(componentTemplates, StreamOutput::writeWriteable); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java index 2cbfda8c9b921..f2c041c2c71bc 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetComposableIndexTemplateAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.template.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -58,7 +58,7 @@ public Request(@Nullable String name) { public Request(StreamInput in) throws IOException { super(in); name = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { includeDefaults = in.readBoolean(); } else { includeDefaults = false; @@ -69,7 +69,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(includeDefaults); } } @@ -123,7 +123,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); indexTemplates = in.readMap(ComposableIndexTemplate::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } else { rolloverConfiguration = null; @@ -146,8 +146,8 @@ public Map indexTemplates() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(indexTemplates, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + out.writeMap(indexTemplates, StreamOutput::writeWriteable); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java index 62c9af29c9473..9c34dbac79357 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/get/GetIndexTemplatesResponse.java @@ -29,7 +29,7 @@ public class GetIndexTemplatesResponse extends ActionResponse implements ToXCont public GetIndexTemplatesResponse(StreamInput in) throws IOException { super(in); - indexTemplates = in.readList(IndexTemplateMetadata::readFrom); + indexTemplates = in.readCollectionAsList(IndexTemplateMetadata::readFrom); } public GetIndexTemplatesResponse(List indexTemplates) { diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java index 08097d5002db8..c34e8d83e8b80 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.template.post; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; import org.elasticsearch.action.support.master.MasterNodeReadRequest; @@ -40,7 +40,7 @@ public SimulateIndexTemplateRequest(StreamInput in) throws IOException { super(in); indexName = in.readString(); indexTemplateRequest = in.readOptionalWriteable(PutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { includeDefaults = in.readBoolean(); } } @@ -50,7 +50,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(indexName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java index 9ccafee1ba56e..b7cc8564be062 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateIndexTemplateResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.template.post; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.metadata.Template; @@ -68,12 +68,12 @@ public SimulateIndexTemplateResponse(StreamInput in) throws IOException { overlappingTemplates = Maps.newMapWithExpectedSize(overlappingTemplatesCount); for (int i = 0; i < overlappingTemplatesCount; i++) { String templateName = in.readString(); - overlappingTemplates.put(templateName, in.readStringList()); + overlappingTemplates.put(templateName, in.readStringCollectionAsList()); } } else { this.overlappingTemplates = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { rolloverConfiguration = in.readOptionalWriteable(RolloverConfiguration::new); } } @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java index 1470042091169..5ecf7b4003ca8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/post/SimulateTemplateAction.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.template.post; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; @@ -63,7 +63,7 @@ public Request(StreamInput in) throws IOException { super(in); templateName = in.readOptionalString(); indexTemplateRequest = in.readOptionalWriteable(PutComposableIndexTemplateAction.Request::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { includeDefaults = in.readBoolean(); } } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(templateName); out.writeOptionalWriteable(indexTemplateRequest); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(includeDefaults); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java index b2fb169b1f629..10c9a5e7205b0 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/template/put/PutIndexTemplateRequest.java @@ -9,7 +9,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.admin.indices.alias.Alias; @@ -72,11 +72,11 @@ public PutIndexTemplateRequest(StreamInput in) throws IOException { super(in); cause = in.readString(); name = in.readString(); - indexPatterns = in.readStringList(); + indexPatterns = in.readStringCollectionAsList(); order = in.readInt(); create = in.readBoolean(); settings = readSettingsFromStream(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { int size = in.readVInt(); for (int i = 0; i < size; i++) { in.readString(); // type - cannot assert on _doc because 7x allows arbitrary type names @@ -446,7 +446,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(order); out.writeBoolean(create); settings.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeVInt(mappings == null ? 0 : 1); if (mappings != null) { out.writeString(MapperService.SINGLE_MAPPING_NAME); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java index e5a399aed85fe..e2ffd97addb42 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ShardValidateQueryRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.broadcast.BroadcastShardRequest; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -33,7 +33,7 @@ public class ShardValidateQueryRequest extends BroadcastShardRequest { public ShardValidateQueryRequest(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { int typesSize = in.readVInt(); if (typesSize > 0) { for (int i = 0; i < typesSize; i++) { @@ -80,7 +80,7 @@ public long nowInMillis() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeVInt(0); // no types to filter } filteringAliases.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java index 6747b76f2aec0..2564f5eb13dc6 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.admin.indices.validate.query; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.IndicesOptions; @@ -48,7 +48,7 @@ public ValidateQueryRequest() { public ValidateQueryRequest(StreamInput in) throws IOException { super(in); query = in.readNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { int typesSize = in.readVInt(); if (typesSize > 0) { for (int i = 0; i < typesSize; i++) { @@ -137,7 +137,7 @@ public boolean allShards() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeNamedWriteable(query); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeVInt(0); // no types to filter } out.writeBoolean(explain); diff --git a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java index 5fbc4ab6b3ff9..6ec0be33e3766 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/indices/validate/query/ValidateQueryResponse.java @@ -65,7 +65,7 @@ public class ValidateQueryResponse extends BroadcastResponse { ValidateQueryResponse(StreamInput in) throws IOException { super(in); valid = in.readBoolean(); - queryExplanations = in.readList(QueryExplanation::new); + queryExplanations = in.readCollectionAsList(QueryExplanation::new); } ValidateQueryResponse( diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java index 94f0b94d554b0..70b010b6ca88d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkAction.java @@ -25,8 +25,4 @@ private BulkAction() { super(NAME, BulkResponse::new); } - @Override - public TransportRequestOptions transportOptions() { - return TRANSPORT_REQUEST_OPTIONS; - } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java index 4ee3f5d238039..425461d1f4ba1 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemRequest.java @@ -60,7 +60,8 @@ public String index() { return request.indices()[0]; } - BulkItemResponse getPrimaryResponse() { + // public for tests + public BulkItemResponse getPrimaryResponse() { return primaryResponse; } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java index b5894d322b90e..e0b15558b860d 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkItemResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; @@ -237,7 +237,7 @@ private Failure(String index, String id, Exception cause, RestStatus status, lon */ public Failure(StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readString(); // can't make an assertion about type names here because too many tests still set their own // types bypassing various checks @@ -253,7 +253,7 @@ public Failure(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeOptionalString(id); diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java index 8a3e957afc658..de874bae56c1e 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor.java @@ -16,12 +16,13 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.RestApiVersion; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; +import org.elasticsearch.threadpool.ScheduledExecutorServiceScheduler; import org.elasticsearch.threadpool.Scheduler; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.XContentType; import java.io.Closeable; @@ -221,9 +222,7 @@ public static Builder builder(BiConsumer Scheduler.wrapAsScheduledCancellable( - scheduledThreadPoolExecutor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS) - ); + return new ScheduledExecutorServiceScheduler(scheduledThreadPoolExecutor); } private final int bulkActions; @@ -465,7 +464,7 @@ public boolean isCancelled() { } }; } - return scheduler.scheduleWithFixedDelay(new Flush(), flushInterval, ThreadPool.Names.GENERIC); + return scheduler.scheduleWithFixedDelay(new Flush(), flushInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE); } // needs to be executed under a lock diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java index ddc7a8c83496e..d1235ba317784 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkProcessor2.java @@ -429,7 +429,7 @@ private void scheduleFlushTask() { } cancellableFlushTask = null; } - }, flushInterval, ThreadPool.Names.GENERIC); + }, flushInterval, threadPool.generic()); } } } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java index ad5dd21962d90..9c3a075629bce 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/BulkRequest.java @@ -83,7 +83,7 @@ public BulkRequest() {} public BulkRequest(StreamInput in) throws IOException { super(in); waitForActiveShards = ActiveShardCount.readFrom(in); - requests.addAll(in.readList(i -> DocWriteRequest.readDocumentRequest(null, i))); + requests.addAll(in.readCollectionAsList(i -> DocWriteRequest.readDocumentRequest(null, i))); refreshPolicy = RefreshPolicy.readFrom(in); timeout = in.readTimeValue(); } diff --git a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java index ea94516e2b698..6503c207e8290 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/Retry.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/Retry.java @@ -15,10 +15,10 @@ import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.Scheduler; -import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayList; import java.util.Iterator; @@ -136,7 +136,7 @@ private void retry(BulkRequest bulkRequestForRetry) { assert backoff.hasNext(); TimeValue next = backoff.next(); logger.trace("Retry of bulk request scheduled in {} ms.", next.millis()); - retryCancellable = scheduler.schedule(() -> this.execute(bulkRequestForRetry), next, ThreadPool.Names.SAME); + retryCancellable = scheduler.schedule(() -> this.execute(bulkRequestForRetry), next, EsExecutors.DIRECT_EXECUTOR_SERVICE); } private BulkRequest createBulkRequestForRetry(BulkResponse bulkItemResponses) { diff --git a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java index 27dae7f6272a9..9266ee3ee0b68 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/TransportShardBulkAction.java @@ -117,9 +117,14 @@ public TransportShardBulkAction( this.postWriteAction = WriteAckDelay.create(settings, threadPool); } + private static final TransportRequestOptions TRANSPORT_REQUEST_OPTIONS = TransportRequestOptions.of( + null, + TransportRequestOptions.Type.BULK + ); + @Override protected TransportRequestOptions transportOptions() { - return BulkAction.INSTANCE.transportOptions(); + return TRANSPORT_REQUEST_OPTIONS; } @Override diff --git a/server/src/main/java/org/elasticsearch/action/bulk/WriteAckDelay.java b/server/src/main/java/org/elasticsearch/action/bulk/WriteAckDelay.java index 49fff2497506d..9a4211618982a 100644 --- a/server/src/main/java/org/elasticsearch/action/bulk/WriteAckDelay.java +++ b/server/src/main/java/org/elasticsearch/action/bulk/WriteAckDelay.java @@ -51,7 +51,7 @@ public WriteAckDelay(long writeDelayIntervalNanos, long writeDelayRandomnessBoun this.threadPool.scheduleWithFixedDelay( new ScheduleTask(), TimeValue.timeValueNanos(writeDelayIntervalNanos), - ThreadPool.Names.GENERIC + this.threadPool.generic() ); } @@ -80,7 +80,7 @@ public void run() { writeDelayInterval, randomDelay ); - threadPool.schedule(new CompletionTask(tasks), randomDelay, ThreadPool.Names.GENERIC); + threadPool.schedule(new CompletionTask(tasks), randomDelay, threadPool.generic()); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java index f4eae07cc291d..68a4e0d0b04c9 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/CreateDataStreamAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.datastreams; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; @@ -66,7 +66,7 @@ public ActionRequestValidationException validate() { public Request(StreamInput in) throws IOException { super(in); this.name = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { this.startTime = in.readVLong(); } else { this.startTime = System.currentTimeMillis(); @@ -77,7 +77,7 @@ public Request(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(name); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeVLong(startTime); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java index ce9b9fe19ab0a..aa69ede54dea1 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/GetDataStreamAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.datastreams; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -71,7 +71,7 @@ public Request(StreamInput in) throws IOException { super(in); this.names = in.readOptionalStringArray(); this.indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { this.includeDefaults = in.readBoolean(); } else { this.includeDefaults = false; @@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalStringArray(names); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(includeDefaults); } } @@ -188,7 +188,7 @@ public DataStreamInfo( ClusterHealthStatus.readFrom(in), in.readOptionalString(), in.readOptionalString(), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) ? in.readOptionalWriteable(TimeSeries::new) : null ); } @@ -221,7 +221,7 @@ public void writeTo(StreamOutput out) throws IOException { dataStreamStatus.writeTo(out); out.writeOptionalString(indexTemplate); out.writeOptionalString(ilmPolicyName); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeOptionalWriteable(timeSeries); } } @@ -301,7 +301,7 @@ public int hashCode() { public record TimeSeries(List> temporalRanges) implements Writeable { TimeSeries(StreamInput in) throws IOException { - this(in.readList(in1 -> new Tuple<>(in1.readInstant(), in1.readInstant()))); + this(in.readCollectionAsList(in1 -> new Tuple<>(in1.readInstant(), in1.readInstant()))); } @Override @@ -341,8 +341,8 @@ public Response(List dataStreams, @Nullable RolloverConfiguratio public Response(StreamInput in) throws IOException { this( - in.readList(DataStreamInfo::new), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010) + in.readCollectionAsList(DataStreamInfo::new), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(RolloverConfiguration::new) : null ); @@ -359,8 +359,8 @@ public RolloverConfiguration getRolloverConfiguration() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(dataStreams); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + out.writeCollection(dataStreams); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(rolloverConfiguration); } } diff --git a/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java b/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java index 58ab8bad3c956..d2a04305cb2ba 100644 --- a/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java +++ b/server/src/main/java/org/elasticsearch/action/datastreams/ModifyDataStreamsAction.java @@ -60,13 +60,13 @@ public static final class Request extends AcknowledgedRequest implement public Request(StreamInput in) throws IOException { super(in); - actions = in.readList(DataStreamAction::new); + actions = in.readCollectionAsList(DataStreamAction::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(actions); + out.writeCollection(actions); } public Request(List actions) { diff --git a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java index f151ae6eca2d7..70f0a9a12e02e 100644 --- a/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java +++ b/server/src/main/java/org/elasticsearch/action/delete/DeleteRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.delete; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.action.DocWriteRequest; @@ -62,7 +62,7 @@ public DeleteRequest(StreamInput in) throws IOException { public DeleteRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -254,7 +254,7 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeBody(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java index 4911053b0f2a0..c2008823b0523 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleAction.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.downsample; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; @@ -64,7 +64,7 @@ public Request(StreamInput in) throws IOException { super(in); sourceIndex = in.readString(); targetIndex = in.readString(); - waitTimeout = in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_054) + waitTimeout = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) ? TimeValue.parseTimeValue(in.readString(), "timeout") : DEFAULT_WAIT_TIMEOUT; downsampleConfig = new DownsampleConfig(in); @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sourceIndex); out.writeString(targetIndex); out.writeString( - out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_054) + out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) ? waitTimeout.getStringRep() : DEFAULT_WAIT_TIMEOUT.getStringRep() ); diff --git a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java index f47ada5ab9285..432dc69caf634 100644 --- a/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java +++ b/server/src/main/java/org/elasticsearch/action/downsample/DownsampleConfig.java @@ -242,7 +242,7 @@ public static Rounding.Prepared createRounding(final String expr, final String t * prefix-fixedInterval-baseIndexName * * Note that this looks for the base index name of the provided index metadata via the - * {@link IndexMetadata#INDEX_DOWNSAMPLE_SOURCE_NAME_KEY} setting. This means that in case + * {@link IndexMetadata#INDEX_DOWNSAMPLE_ORIGIN_NAME_KEY} setting. This means that in case * the provided index was already downsampled, we'll use the original source index (of the * current provided downsample index) as the base index name. */ @@ -251,10 +251,13 @@ public static String generateDownsampleIndexName( IndexMetadata sourceIndexMetadata, DateHistogramInterval fixedInterval ) { - String downsampleSourceName = sourceIndexMetadata.getSettings().get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY); + String downsampleOriginName = IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.get(sourceIndexMetadata.getSettings()); String sourceIndexName; - if (downsampleSourceName != null) { - sourceIndexName = downsampleSourceName; + if (Strings.hasText(downsampleOriginName)) { + sourceIndexName = downsampleOriginName; + } else if (Strings.hasText(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(sourceIndexMetadata.getSettings()))) { + // bwc for downsample indices created pre 8.10 which didn't configure the origin + sourceIndexName = IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(sourceIndexMetadata.getSettings()); } else { sourceIndexName = sourceIndexMetadata.getIndex().getName(); } diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java index 685eb0b8a1995..d1e48574fe226 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.explain; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ValidateActions; import org.elasticsearch.action.support.single.shard.SingleShardRequest; @@ -54,7 +54,7 @@ public ExplainRequest(String index, String id) { ExplainRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type); } @@ -160,7 +160,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java index 97c56069fa762..51e7509863796 100644 --- a/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java +++ b/server/src/main/java/org/elasticsearch/action/explain/ExplainResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.explain; import org.apache.lucene.search.Explanation; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -69,7 +69,7 @@ public ExplainResponse(String index, String id, boolean exists, Explanation expl public ExplainResponse(StreamInput in) throws IOException { super(in); index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readString(); } id = in.readString(); @@ -118,7 +118,7 @@ public RestStatus status() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java index e2ae9fd17bb23..095a5ec8f5594 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilities.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -35,7 +35,6 @@ import java.util.function.Function; import java.util.function.Predicate; import java.util.stream.Collectors; -import java.util.stream.Stream; import static org.elasticsearch.index.mapper.TimeSeriesParams.TIME_SERIES_DIMENSION_PARAM; import static org.elasticsearch.index.mapper.TimeSeriesParams.TIME_SERIES_METRIC_PARAM; @@ -227,7 +226,7 @@ public FieldCapabilities( this.isMetadataField = in.readBoolean(); this.isSearchable = in.readBoolean(); this.isAggregatable = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.isDimension = in.readBoolean(); this.metricType = in.readOptionalEnum(TimeSeriesParams.MetricType.class); } else { @@ -237,14 +236,14 @@ public FieldCapabilities( this.indices = in.readOptionalStringArray(); this.nonSearchableIndices = in.readOptionalStringArray(); this.nonAggregatableIndices = in.readOptionalStringArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.nonDimensionIndices = in.readOptionalStringArray(); this.metricConflictsIndices = in.readOptionalStringArray(); } else { this.nonDimensionIndices = null; this.metricConflictsIndices = null; } - meta = in.readMap(i -> i.readSet(StreamInput::readString)); + meta = in.readMap(i -> i.readCollectionAsSet(StreamInput::readString)); } @Override @@ -254,18 +253,18 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isMetadataField); out.writeBoolean(isSearchable); out.writeBoolean(isAggregatable); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeBoolean(isDimension); out.writeOptionalEnum(metricType); } out.writeOptionalStringArray(indices); out.writeOptionalStringArray(nonSearchableIndices); out.writeOptionalStringArray(nonAggregatableIndices); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeOptionalStringArray(nonDimensionIndices); out.writeOptionalStringArray(metricConflictsIndices); } - out.writeMap(meta, StreamOutput::writeString, (o, set) -> o.writeCollection(set, StreamOutput::writeString)); + out.writeMap(meta, StreamOutput::writeStringCollection); } @Override @@ -301,9 +300,9 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws List>> entries = new ArrayList<>(meta.entrySet()); entries.sort(Map.Entry.comparingByKey()); // provide predictable order for (Map.Entry> entry : entries) { - List values = new ArrayList<>(entry.getValue()); - values.sort(String::compareTo); // provide predictable order - builder.stringListField(entry.getKey(), values); + String[] values = entry.getValue().toArray(Strings.EMPTY_ARRAY); + Arrays.sort(values, String::compareTo); // provide predictable order + builder.array(entry.getKey(), values); } builder.endObject(); } @@ -545,8 +544,13 @@ void add( } } - Stream getIndices() { - return indicesList.stream().flatMap(c -> Arrays.stream(c.indices)); + void getIndices(Set into) { + for (int i = 0; i < indicesList.size(); i++) { + IndexCaps indexCaps = indicesList.get(i); + for (String element : indexCaps.indices) { + into.add(element); + } + } } private String[] filterIndices(int length, Predicate pred) { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java index 4d7c637c90e92..e47de4bebc1bb 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesFailure.java @@ -38,7 +38,7 @@ public FieldCapabilitiesFailure(String[] indices, Exception exception) { } public FieldCapabilitiesFailure(StreamInput in) throws IOException { - this.indices = in.readStringList(); + this.indices = in.readStringCollectionAsList(); this.exception = in.readException(); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java index 7979811561ee3..aac322c4a1de7 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesIndexResponse.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -23,7 +24,7 @@ import java.util.stream.Stream; final class FieldCapabilitiesIndexResponse implements Writeable { - private static final TransportVersion MAPPING_HASH_VERSION = TransportVersion.V_8_2_0; + private static final TransportVersion MAPPING_HASH_VERSION = TransportVersions.V_8_2_0; private final String indexName; @Nullable @@ -60,7 +61,7 @@ final class FieldCapabilitiesIndexResponse implements Writeable { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); - out.writeMap(responseMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + out.writeMap(responseMap, StreamOutput::writeWriteable); out.writeBoolean(canMatch); if (out.getTransportVersion().onOrAfter(MAPPING_HASH_VERSION)) { out.writeOptionalString(indexMappingHash); @@ -71,14 +72,14 @@ private record GroupByMappingHash(List indices, String indexMappingHash, implements Writeable { GroupByMappingHash(StreamInput in) throws IOException { - this(in.readStringList(), in.readString(), in.readMap(IndexFieldCapabilities::new)); + this(in.readStringCollectionAsList(), in.readString(), in.readMap(IndexFieldCapabilities::new)); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(indices); out.writeString(indexMappingHash); - out.writeMap(responseMap, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + out.writeMap(responseMap, StreamOutput::writeWriteable); } Stream getResponses() { @@ -88,10 +89,10 @@ Stream getResponses() { static List readList(StreamInput input) throws IOException { if (input.getTransportVersion().before(MAPPING_HASH_VERSION)) { - return input.readList(FieldCapabilitiesIndexResponse::new); + return input.readCollectionAsList(FieldCapabilitiesIndexResponse::new); } - final List ungroupedList = input.readList(FieldCapabilitiesIndexResponse::new); - final List groups = input.readList(GroupByMappingHash::new); + final List ungroupedList = input.readCollectionAsList(FieldCapabilitiesIndexResponse::new); + final List groups = input.readCollectionAsList(GroupByMappingHash::new); return Stream.concat(ungroupedList.stream(), groups.stream().flatMap(GroupByMappingHash::getResponses)).toList(); } @@ -114,8 +115,8 @@ static void writeList(StreamOutput output, List return new GroupByMappingHash(indices, indexMappingHash, responseMap); }) .toList(); - output.writeList(ungroupedResponses); - output.writeList(groupedResponses); + output.writeCollection(ungroupedResponses); + output.writeCollection(groupedResponses); } /** diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java index cdb648ba2feba..ba238638efba4 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -42,9 +42,9 @@ class FieldCapabilitiesNodeRequest extends ActionRequest implements IndicesReque FieldCapabilitiesNodeRequest(StreamInput in) throws IOException { super(in); - shardIds = in.readList(ShardId::new); + shardIds = in.readCollectionAsList(ShardId::new); fields = in.readStringArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { filters = in.readStringArray(); allowedTypes = in.readStringArray(); } else { @@ -122,9 +122,9 @@ public long nowInMillis() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(shardIds); + out.writeCollection(shardIds); out.writeStringArray(fields); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringArray(filters); out.writeStringArray(allowedTypes); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponse.java index 91f079cadbd99..d358e69c5b2ff 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponse.java @@ -39,13 +39,13 @@ class FieldCapabilitiesNodeResponse extends ActionResponse implements Writeable super(in); this.indexResponses = FieldCapabilitiesIndexResponse.readList(in); this.failures = in.readMap(ShardId::new, StreamInput::readException); - this.unmatchedShardIds = in.readSet(ShardId::new); + this.unmatchedShardIds = in.readCollectionAsSet(ShardId::new); } @Override public void writeTo(StreamOutput out) throws IOException { FieldCapabilitiesIndexResponse.writeList(out, indexResponses); - out.writeMap(failures, (o, v) -> v.writeTo(o), StreamOutput::writeException); + out.writeMap(failures, StreamOutput::writeWriteable, StreamOutput::writeException); out.writeCollection(unmatchedShardIds); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java index 18e08b2e8bdf9..95555cfd59ab2 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -58,7 +58,7 @@ public FieldCapabilitiesRequest(StreamInput in) throws IOException { indexFilter = in.readOptionalNamedWriteable(QueryBuilder.class); nowInMillis = in.readOptionalLong(); runtimeFields = in.readMap(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { filters = in.readStringArray(); types = in.readStringArray(); } @@ -96,7 +96,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(indexFilter); out.writeOptionalLong(nowInMillis); out.writeGenericMap(runtimeFields); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringArray(filters); out.writeStringArray(types); } diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java index 6398ffaa713c5..84388864166dc 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponse.java @@ -78,7 +78,7 @@ public FieldCapabilitiesResponse(StreamInput in) throws IOException { indices = in.readStringArray(); this.responseMap = in.readMap(FieldCapabilitiesResponse::readField); this.indexResponses = FieldCapabilitiesIndexResponse.readList(in); - this.failures = in.readList(FieldCapabilitiesFailure::new); + this.failures = in.readCollectionAsList(FieldCapabilitiesFailure::new); } /** @@ -147,13 +147,13 @@ private static Map readField(StreamInput in) throws I @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringArray(indices); - out.writeMap(responseMap, StreamOutput::writeString, FieldCapabilitiesResponse::writeField); + out.writeMap(responseMap, FieldCapabilitiesResponse::writeField); FieldCapabilitiesIndexResponse.writeList(out, indexResponses); - out.writeList(failures); + out.writeCollection(failures); } private static void writeField(StreamOutput out, Map map) throws IOException { - out.writeMap(map, StreamOutput::writeString, (valueOut, fc) -> fc.writeTo(valueOut)); + out.writeMap(map, StreamOutput::writeWriteable); } @Override diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java index 5b9ac325d1cf0..57a9dd049d26c 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/IndexFieldCapabilities.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -68,7 +68,7 @@ public class IndexFieldCapabilities implements Writeable { this.isMetadatafield = in.readBoolean(); this.isSearchable = in.readBoolean(); this.isAggregatable = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.isDimension = in.readBoolean(); this.metricType = in.readOptionalEnum(TimeSeriesParams.MetricType.class); } else { @@ -85,11 +85,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(isMetadatafield); out.writeBoolean(isSearchable); out.writeBoolean(isAggregatable); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeBoolean(isDimension); out.writeOptionalEnum(metricType); } - out.writeMap(meta, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(meta, StreamOutput::writeString); } public String getName() { diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java index d7d923ec089aa..37313c435319c 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/ResponseRewriter.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import java.util.HashMap; import java.util.Map; @@ -29,7 +30,7 @@ public static Map rewriteOldResponses( String[] filters, String[] allowedTypes ) { - if (version.onOrAfter(TransportVersion.V_8_2_0)) { + if (version.onOrAfter(TransportVersions.V_8_2_0)) { return input; // nothing needs to be done } Function transformer = buildTransformer(input, filters, allowedTypes); diff --git a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java index 0c5645826219a..79b9fa5099467 100644 --- a/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java +++ b/server/src/main/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesAction.java @@ -23,9 +23,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; @@ -47,6 +47,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -310,69 +311,88 @@ private static FieldCapabilitiesResponse merge( } task.ensureNotCancelled(); - return new FieldCapabilitiesResponse( - indices, - buildResponseMap(indexResponsesMap, responseMapBuilder, request.includeUnmapped()), - failures - ); + Map> responseMap = Maps.newMapWithExpectedSize(responseMapBuilder.size()); + if (request.includeUnmapped()) { + collectResponseMapIncludingUnmapped(indices, responseMapBuilder, responseMap); + } else { + collectResponseMap(responseMapBuilder, responseMap); + } + return new FieldCapabilitiesResponse(indices, Collections.unmodifiableMap(responseMap), failures); } - private static Map> buildResponseMap( - Map indexResponsesMap, + private static void collectResponseMapIncludingUnmapped( + String[] indices, Map> responseMapBuilder, - boolean includeUnmapped + Map> responseMap ) { - Map> responseMap = Maps.newMapWithExpectedSize(responseMapBuilder.size()); - final var indices = indexResponsesMap.keySet(); + final Set mappedScratch = new HashSet<>(); for (Map.Entry> entry : responseMapBuilder.entrySet()) { var typeMapBuilder = entry.getValue().entrySet(); - Function unmapped = null; - if (includeUnmapped) { - // do this directly, rather than using the builder, to save creating a whole lot of objects we don't need - unmapped = getUnmappedFields( - indices, - entry.getKey(), - typeMapBuilder.stream().flatMap(t -> t.getValue().getIndices()).collect(Collectors.toSet()) - ); + // do this directly, rather than using the builder, to save creating a whole lot of objects we don't need + mappedScratch.clear(); + for (Map.Entry b : typeMapBuilder) { + b.getValue().getIndices(mappedScratch); } + var unmapped = getUnmappedFields(indices, entry.getKey(), mappedScratch); final int resSize = typeMapBuilder.size() + (unmapped == null ? 0 : 1); - boolean multiTypes = resSize > 1; - final Map res = Maps.newHashMapWithExpectedSize(resSize); - for (Map.Entry e : typeMapBuilder) { - res.put(e.getKey(), e.getValue().build(multiTypes)); - } + final Map res = capabilities(resSize, typeMapBuilder); if (unmapped != null) { - res.put("unmapped", unmapped.apply(multiTypes)); + res.put("unmapped", unmapped.apply(resSize > 1)); } responseMap.put(entry.getKey(), Collections.unmodifiableMap(res)); } - return Collections.unmodifiableMap(responseMap); + } + + private static void collectResponseMap( + Map> responseMapBuilder, + Map> responseMap + ) { + for (Map.Entry> entry : responseMapBuilder.entrySet()) { + var typeMapBuilder = entry.getValue().entrySet(); + responseMap.put(entry.getKey(), Collections.unmodifiableMap(capabilities(typeMapBuilder.size(), typeMapBuilder))); + } + } + + private static Map capabilities(int resSize, Set> builders) { + boolean multiTypes = resSize > 1; + final Map res = Maps.newHashMapWithExpectedSize(resSize); + for (Map.Entry e : builders) { + res.put(e.getKey(), e.getValue().build(multiTypes)); + } + return res; } @Nullable - private static Function getUnmappedFields(Set indices, String field, Set mappedIndices) { - if (mappedIndices.size() != indices.size()) { - return mt -> new FieldCapabilities( - field, - "unmapped", - false, - false, - false, - false, - null, - mt ? Sets.difference(indices, mappedIndices).toArray(Strings.EMPTY_ARRAY) : null, - null, - null, - null, - null, - Map.of() - ); + private static Function getUnmappedFields(String[] indices, String field, Set mappedIndices) { + if (mappedIndices.size() != indices.length) { + return mt -> { + final String[] diff; + if (mt) { + diff = new String[indices.length - mappedIndices.size()]; + Iterator indicesIter = Iterators.forArray(indices); + for (int i = 0; i < diff.length; i++) { + diff[i] = nextIndex(indicesIter, mappedIndices); + } + } else { + diff = null; + } + return new FieldCapabilities(field, "unmapped", false, false, false, false, null, diff, null, null, null, null, Map.of()); + }; } return null; } + private static String nextIndex(Iterator iter, Set filtered) { + while (true) { + String index = iter.next(); + if (filtered.contains(index) == false) { + return index; + } + } + } + private static void innerMerge( String[] indices, Map> responseMapBuilder, diff --git a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java index 2654a69b12728..f15bb2ef448df 100644 --- a/server/src/main/java/org/elasticsearch/action/get/GetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/GetRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; @@ -66,7 +66,7 @@ public GetRequest() {} GetRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readString(); } id = in.readString(); @@ -79,7 +79,7 @@ public GetRequest() {} this.versionType = VersionType.fromValue(in.readByte()); this.version = in.readLong(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::readFrom); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -89,7 +89,7 @@ public GetRequest() {} @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); @@ -102,7 +102,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeByte(versionType.getValue()); out.writeLong(version); out.writeOptionalWriteable(fetchSourceContext); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java index 003495c9b4801..e00edefc26289 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -85,7 +85,7 @@ public Item() { public Item(StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -179,7 +179,7 @@ public Item fetchSourceContext(FetchSourceContext fetchSourceContext) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); @@ -262,8 +262,8 @@ public MultiGetRequest(StreamInput in) throws IOException { preference = in.readOptionalString(); refresh = in.readBoolean(); realtime = in.readBoolean(); - items = in.readList(Item::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + items = in.readCollectionAsList(Item::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -276,8 +276,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(preference); out.writeBoolean(refresh); out.writeBoolean(realtime); - out.writeList(items); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + out.writeCollection(items); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java index b7c68a51ed604..e91329e810397 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.get; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -57,7 +57,7 @@ public Failure(String index, String id, Exception exception) { Failure(StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -88,7 +88,7 @@ public String getMessage() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java b/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java index 2f2615f64cbd1..fd0aa8ea2e232 100644 --- a/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java +++ b/server/src/main/java/org/elasticsearch/action/get/MultiGetShardRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.single.shard.SingleShardRequest; import org.elasticsearch.common.io.stream.StreamInput; @@ -83,7 +83,7 @@ public int hashCode() { preference = in.readOptionalString(); refresh = in.readBoolean(); realtime = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -103,7 +103,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(preference); out.writeBoolean(refresh); out.writeBoolean(realtime); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java b/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java index c3127c9a2a05d..37e07b3ca2e07 100644 --- a/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java +++ b/server/src/main/java/org/elasticsearch/action/get/TransportGetFromTranslogAction.java @@ -14,8 +14,10 @@ import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.HandledTransportAction; +import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -41,7 +43,7 @@ public class TransportGetFromTranslogAction extends HandledTransportAction< TransportGetFromTranslogAction.Request, TransportGetFromTranslogAction.Response> { - public static final String NAME = "internal:data/read/get_from_translog"; + public static final String NAME = "indices:data/read/get_from_translog"; public static final Logger logger = LogManager.getLogger(TransportGetFromTranslogAction.class); private final IndicesService indicesService; @@ -83,7 +85,7 @@ protected void doExecute(Task task, Request request, ActionListener li }); } - public static class Request extends ActionRequest { + public static class Request extends ActionRequest implements IndicesRequest { private final GetRequest getRequest; private final ShardId shardId; @@ -123,6 +125,16 @@ public ActionRequestValidationException validate() { public String toString() { return "GetFromTranslogRequest{" + "getRequest=" + getRequest + ", shardId=" + shardId + "}"; } + + @Override + public String[] indices() { + return getRequest.indices(); + } + + @Override + public IndicesOptions indicesOptions() { + return getRequest.indicesOptions(); + } } public static class Response extends ActionResponse { diff --git a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java index f1d9c73f35e91..b1beecd980cee 100644 --- a/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java +++ b/server/src/main/java/org/elasticsearch/action/index/IndexRequest.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; @@ -67,7 +68,7 @@ public class IndexRequest extends ReplicatedWriteRequest implements DocWriteRequest, CompositeIndicesRequest { private static final long SHALLOW_SIZE = RamUsageEstimator.shallowSizeOfInstance(IndexRequest.class); - private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersion.V_8_500_049; + private static final TransportVersion PIPELINES_HAVE_RUN_FIELD_ADDED = TransportVersions.V_8_500_049; /** * Max length of the source document to include into string() @@ -129,7 +130,7 @@ public IndexRequest(StreamInput in) throws IOException { public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readOptionalString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -140,10 +141,10 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio version = in.readLong(); versionType = VersionType.fromValue(in.readByte()); pipeline = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { finalPipeline = in.readOptionalString(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { isPipelineResolved = in.readBoolean(); } isRetry = in.readBoolean(); @@ -155,12 +156,12 @@ public IndexRequest(@Nullable ShardId shardId, StreamInput in) throws IOExceptio } ifSeqNo = in.readZLong(); ifPrimaryTerm = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { requireAlias = in.readBoolean(); } else { requireAlias = false; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { dynamicTemplates = in.readMap(StreamInput::readString); } if (in.getTransportVersion().onOrAfter(PIPELINES_HAVE_RUN_FIELD_ADDED)) { @@ -654,7 +655,7 @@ public void reset() { } public void checkAutoIdWithOpTypeCreateSupportedByVersion(TransportVersion version) { - if (id == null && opType == OpType.CREATE && version.before(TransportVersion.V_7_5_0)) { + if (id == null && opType == OpType.CREATE && version.before(TransportVersions.V_7_5_0)) { throw new IllegalArgumentException( "optype create not supported for indexing requests without explicit id below transport version 7500099, current version " + version @@ -687,7 +688,7 @@ public void writeThin(StreamOutput out) throws IOException { } private void writeBody(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeOptionalString(id); @@ -697,10 +698,10 @@ private void writeBody(StreamOutput out) throws IOException { out.writeLong(version); out.writeByte(versionType.getValue()); out.writeOptionalString(pipeline); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { out.writeOptionalString(finalPipeline); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { out.writeBoolean(isPipelineResolved); } out.writeBoolean(isRetry); @@ -713,11 +714,11 @@ private void writeBody(StreamOutput out) throws IOException { } out.writeZLong(ifSeqNo); out.writeVLong(ifPrimaryTerm); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(requireAlias); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { - out.writeMap(dynamicTemplates, StreamOutput::writeString, StreamOutput::writeString); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { + out.writeMap(dynamicTemplates, StreamOutput::writeString); } else { if (dynamicTemplates.isEmpty() == false) { throw new IllegalArgumentException("[dynamic_templates] parameter requires all nodes on " + Version.V_7_13_0 + " or later"); diff --git a/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java b/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java index 0271afb9a9b77..d4ae5e33f3b10 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/SimulateProcessorResult.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.ingest; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -160,7 +160,7 @@ public SimulateProcessorResult(String type, String processorTag, String descript * Read from a stream. */ SimulateProcessorResult(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.processorTag = in.readOptionalString(); } else { this.processorTag = in.readString(); @@ -179,7 +179,7 @@ public SimulateProcessorResult(String type, String processorTag, String descript @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalString(processorTag); } else { out.writeString(processorTag); diff --git a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java index cd05c5565ccb7..6a5f83e92229f 100644 --- a/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/AbstractSearchAsyncAction.java @@ -177,7 +177,8 @@ protected void notifyListShards( SearchProgressListener.buildSearchShards(this.shardsIts), SearchProgressListener.buildSearchShards(toSkipShardsIts), clusters, - sourceBuilder == null || sourceBuilder.size() > 0 + sourceBuilder == null || sourceBuilder.size() > 0, + timeProvider ); } diff --git a/server/src/main/java/org/elasticsearch/action/search/CCSSingleCoordinatorSearchProgressListener.java b/server/src/main/java/org/elasticsearch/action/search/CCSSingleCoordinatorSearchProgressListener.java new file mode 100644 index 0000000000000..00567bcd712da --- /dev/null +++ b/server/src/main/java/org/elasticsearch/action/search/CCSSingleCoordinatorSearchProgressListener.java @@ -0,0 +1,317 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.action.search; + +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.common.util.CollectionUtils; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.search.SearchShardTarget; +import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.query.QuerySearchResult; +import org.elasticsearch.transport.RemoteClusterAware; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +/** + * Use this progress listener for cross-cluster searches where a single + * coordinator is used for all clusters (minimize_roundtrips=false). + * It updates state in the SearchResponse.Clusters object as the search + * progresses so that the metadata required for the _clusters/details + * section in the SearchResponse is accurate. + */ +public class CCSSingleCoordinatorSearchProgressListener extends SearchProgressListener { + + private SearchResponse.Clusters clusters; + private TransportSearchAction.SearchTimeProvider timeProvider; + + /** + * Executed when shards are ready to be queried (after can-match) + * + * @param shards The list of shards to query. + * @param skipped The list of skipped shards. + * @param clusters The statistics for remote clusters included in the search. + * @param fetchPhase true if the search needs a fetch phase, false otherwise. + **/ + @Override + public void onListShards( + List shards, + List skipped, + SearchResponse.Clusters clusters, + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider + ) { + assert clusters.isCcsMinimizeRoundtrips() == false : "minimize_roundtrips must be false to use this SearchListener"; + + this.clusters = clusters; + this.timeProvider = timeProvider; + + // Partition by clusterAlias and get counts + Map skippedByClusterAlias = partitionCountsByClusterAlias(skipped); + // the 'shards' list does not include the shards in the 'skipped' list, so combine counts from both to get total + Map totalByClusterAlias = partitionCountsByClusterAlias(shards); + skippedByClusterAlias.forEach((cluster, count) -> totalByClusterAlias.merge(cluster, count, Integer::sum)); + + for (Map.Entry entry : totalByClusterAlias.entrySet()) { + String clusterAlias = entry.getKey(); + AtomicReference clusterRef = clusters.getCluster(clusterAlias); + assert clusterRef.get().getTotalShards() == null : "total shards should not be set on a Cluster before onListShards"; + + int totalCount = entry.getValue(); + int skippedCount = skippedByClusterAlias.getOrDefault(clusterAlias, 0); + TimeValue took = null; + + boolean swapped; + do { + SearchResponse.Cluster curr = clusterRef.get(); + SearchResponse.Cluster.Status status = curr.getStatus(); + assert status == SearchResponse.Cluster.Status.RUNNING : "should have RUNNING status during onListShards but has " + status; + + // if all shards are marked as skipped, the search is done - mark as SUCCESSFUL + if (skippedCount == totalCount) { + took = new TimeValue(timeProvider.buildTookInMillis()); + status = SearchResponse.Cluster.Status.SUCCESSFUL; + } + + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(curr).setStatus(status) + .setTotalShards(totalCount) + .setSuccessfulShards(skippedCount) + .setSkippedShards(skippedCount) + .setFailedShards(0) + .setTook(took) + .setTimedOut(false) + .build(); + + swapped = clusterRef.compareAndSet(curr, updated); + assert swapped : "compareAndSet in onListShards should never fail due to race condition"; + } while (swapped == false); + } + } + + /** + * Executed when a shard returns a query result. + * + * @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards}. + * @param queryResult QuerySearchResult holding the result for a SearchShardTarget + */ + @Override + public void onQueryResult(int shardIndex, QuerySearchResult queryResult) { + // we only need to update Cluster state here if the search has timed out, since: + // 1) this is the only callback that gets search timedOut info and + // 2) the onFinalReduce will get all these shards again so the final accounting can be done there + // for queries that did not time out + if (queryResult.searchTimedOut() && clusters.hasClusterObjects()) { + SearchShardTarget shardTarget = queryResult.getSearchShardTarget(); + String clusterAlias = shardTarget.getClusterAlias(); + if (clusterAlias == null) { + clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } + AtomicReference clusterRef = clusters.getCluster(clusterAlias); + boolean swapped; + do { + SearchResponse.Cluster curr = clusterRef.get(); + if (curr.isTimedOut()) { + break; // cluster has already been marked as timed out on some other shard + } + if (curr.getStatus() == SearchResponse.Cluster.Status.FAILED || curr.getStatus() == SearchResponse.Cluster.Status.SKIPPED) { + break; // safety check to make sure it hasn't hit a terminal FAILED/SKIPPED state where timeouts don't matter + } + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(curr).setTimedOut(true).build(); + swapped = clusterRef.compareAndSet(curr, updated); + } while (swapped == false); + } + } + + /** + * Executed when a shard reports a query failure. + * + * @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}. + * @param shardTarget The last shard target that thrown an exception. + * @param e The cause of the failure. + */ + @Override + public void onQueryFailure(int shardIndex, SearchShardTarget shardTarget, Exception e) { + if (clusters.hasClusterObjects() == false) { + return; + } + String clusterAlias = shardTarget.getClusterAlias(); + if (clusterAlias == null) { + clusterAlias = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; + } + AtomicReference clusterRef = clusters.getCluster(clusterAlias); + boolean swapped; + do { + TimeValue took = null; + SearchResponse.Cluster curr = clusterRef.get(); + SearchResponse.Cluster.Status status = SearchResponse.Cluster.Status.RUNNING; + int numFailedShards = curr.getFailedShards() == null ? 1 : curr.getFailedShards() + 1; + + assert curr.getTotalShards() != null : "total shards should be set on the Cluster but not for " + clusterAlias; + if (curr.getTotalShards() == numFailedShards) { + if (curr.isSkipUnavailable()) { + status = SearchResponse.Cluster.Status.SKIPPED; + } else { + status = SearchResponse.Cluster.Status.FAILED; + // TODO in the fail-fast ticket, should we throw an exception here to stop the search? + } + } else if (curr.getTotalShards() == numFailedShards + curr.getSuccessfulShards()) { + status = SearchResponse.Cluster.Status.PARTIAL; + took = new TimeValue(timeProvider.buildTookInMillis()); + } + + // creates a new unmodifiable list + List failures = CollectionUtils.appendToCopy(curr.getFailures(), new ShardSearchFailure(e, shardTarget)); + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(curr).setStatus(status) + .setFailedShards(numFailedShards) + .setFailures(failures) + .setTook(took) + .build(); + + swapped = clusterRef.compareAndSet(curr, updated); + } while (swapped == false); + } + + /** + * Executed when a partial reduce is created. The number of partial reduce can be controlled via + * {@link SearchRequest#setBatchedReduceSize(int)}. + * + * Note that onPartialReduce and onFinalReduce are called with cumulative data so far. + * For example if the first call to onPartialReduce has 5 shards, the second call will + * have those same 5 shards plus the new batch. onFinalReduce will see all those + * shards one final time. + * + * @param shards The list of shards that are part of this reduce. + * @param totalHits The total number of hits in this reduce. + * @param aggs The partial result for aggregations. + * @param reducePhase The version number for this reduce. + */ + @Override + public void onPartialReduce(List shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { + Map totalByClusterAlias = partitionCountsByClusterAlias(shards); + + for (Map.Entry entry : totalByClusterAlias.entrySet()) { + String clusterAlias = entry.getKey(); + int successfulCount = entry.getValue().intValue(); + + AtomicReference clusterRef = clusters.getCluster(clusterAlias); + boolean swapped; + do { + SearchResponse.Cluster curr = clusterRef.get(); + SearchResponse.Cluster.Status status = curr.getStatus(); + if (status != SearchResponse.Cluster.Status.RUNNING) { + // don't swap in a new Cluster if the final state has already been set + break; + } + TimeValue took = null; + int successfulShards = successfulCount + curr.getSkippedShards(); + if (successfulShards == curr.getTotalShards()) { + status = curr.isTimedOut() ? SearchResponse.Cluster.Status.PARTIAL : SearchResponse.Cluster.Status.SUCCESSFUL; + took = new TimeValue(timeProvider.buildTookInMillis()); + } else if (successfulShards + curr.getFailedShards() == curr.getTotalShards()) { + status = SearchResponse.Cluster.Status.PARTIAL; + took = new TimeValue(timeProvider.buildTookInMillis()); + } + + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(curr).setStatus(status) + .setSuccessfulShards(successfulShards) + .setTook(took) + .build(); + + swapped = clusterRef.compareAndSet(curr, updated); + } while (swapped == false); + } + } + + /** + * Executed once when the final reduce is created. + * + * Note that his will see all the shards, even if they have been passed to the onPartialReduce + * method already. + * + * @param shards The list of shards that are part of this reduce. + * @param totalHits The total number of hits in this reduce. + * @param aggs The final result for aggregations. + * @param reducePhase The version number for this reduce. + */ + @Override + public void onFinalReduce(List shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) { + if (clusters.hasClusterObjects() == false) { + return; + } + + Map totalByClusterAlias = partitionCountsByClusterAlias(shards); + + for (Map.Entry entry : totalByClusterAlias.entrySet()) { + String clusterAlias = entry.getKey(); + int successfulCount = entry.getValue().intValue(); + + AtomicReference clusterRef = clusters.getCluster(clusterAlias); + boolean swapped; + do { + SearchResponse.Cluster curr = clusterRef.get(); + SearchResponse.Cluster.Status status = curr.getStatus(); + if (status != SearchResponse.Cluster.Status.RUNNING) { + // don't swap in a new Cluster if the final state has already been set + break; + } + TimeValue took = new TimeValue(timeProvider.buildTookInMillis()); + int successfulShards = successfulCount + curr.getSkippedShards(); + assert successfulShards + curr.getFailedShards() == curr.getTotalShards() + : "successfulShards(" + + successfulShards + + ") + failedShards(" + + curr.getFailedShards() + + ") != totalShards (" + + curr.getTotalShards() + + ')'; + if (curr.isTimedOut() || successfulShards < curr.getTotalShards()) { + status = SearchResponse.Cluster.Status.PARTIAL; + } else { + assert successfulShards == curr.getTotalShards() + : "successful (" + successfulShards + ") should equal total(" + curr.getTotalShards() + ") if get here"; + status = SearchResponse.Cluster.Status.SUCCESSFUL; + } + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(curr).setStatus(status) + .setSuccessfulShards(successfulShards) + .setTook(took) + .build(); + swapped = clusterRef.compareAndSet(curr, updated); + } while (swapped == false); + } + } + + /** + * Executed when a shard returns a fetch result. + * + * @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}. + */ + @Override + public void onFetchResult(int shardIndex) {} + + /** + * Executed when a shard reports a fetch failure. + * + * @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards})}. + * @param shardTarget The last shard target that thrown an exception. + * @param exc The cause of the failure. + */ + @Override + public void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {} + + private Map partitionCountsByClusterAlias(List shards) { + final Map res = new HashMap<>(); + for (SearchShard shard : shards) { + res.merge(Objects.requireNonNullElse(shard.clusterAlias(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY), 1, Integer::sum); + } + return res; + } +} diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java index 8c5d828fff01f..865b7bdf8abfa 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; @@ -151,7 +151,7 @@ public CanMatchNodeRequest(StreamInput in) throws IOException { source = in.readOptionalWriteable(SearchSourceBuilder::new); indicesOptions = IndicesOptions.readIndicesOptions(in); searchType = SearchType.fromId(in.readByte()); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore String[] types = in.readStringArray(); if (types.length > 0) { @@ -167,7 +167,7 @@ public CanMatchNodeRequest(StreamInput in) throws IOException { nowInMillis = in.readVLong(); clusterAlias = in.readOptionalString(); waitForCheckpointsTimeout = in.readTimeValue(); - shards = in.readList(Shard::new); + shards = in.readCollectionAsList(Shard::new); indices = shards.stream().map(Shard::getOriginalIndices).flatMap(Arrays::stream).distinct().toArray(String[]::new); } @@ -177,7 +177,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(source); indicesOptions.writeIndicesOptions(out); out.writeByte(searchType.id()); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } @@ -188,7 +188,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(nowInMillis); out.writeOptionalString(clusterAlias); out.writeTimeValue(waitForCheckpointsTimeout); - out.writeList(shards); + out.writeCollection(shards); } public List getShardLevelRequests() { diff --git a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java index 05aaaa56583ed..9909ad60a949e 100644 --- a/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/CanMatchNodeResponse.java @@ -24,7 +24,7 @@ public class CanMatchNodeResponse extends TransportResponse { public CanMatchNodeResponse(StreamInput in) throws IOException { super(in); - responses = in.readList(ResponseOrFailure::new); + responses = in.readCollectionAsList(ResponseOrFailure::new); } public CanMatchNodeResponse(List responses) { @@ -33,7 +33,7 @@ public CanMatchNodeResponse(List responses) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(responses); + out.writeCollection(responses); } public List getResponses() { diff --git a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java index a72e54d472b66..6309c0264a558 100644 --- a/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/ClearScrollRequest.java @@ -72,7 +72,7 @@ public void writeTo(StreamOutput out) throws IOException { if (scrollIds == null) { out.writeVInt(0); } else { - out.writeStringArray(scrollIds.toArray(new String[scrollIds.size()])); + out.writeStringCollection(scrollIds); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java index a8cb8002a80d8..633e56b97a833 100644 --- a/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java +++ b/server/src/main/java/org/elasticsearch/action/search/OpenPointInTimeRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -51,7 +51,7 @@ public OpenPointInTimeRequest(StreamInput in) throws IOException { this.keepAlive = in.readTimeValue(); this.routing = in.readOptionalString(); this.preference = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_017)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { this.maxConcurrentShardRequests = in.readVInt(); } } @@ -64,7 +64,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(keepAlive); out.writeOptionalString(routing); out.writeOptionalString(preference); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_017)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeVInt(maxConcurrentShardRequests); } } diff --git a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java index b9d6ffeda2931..f78d5f4005755 100644 --- a/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java +++ b/server/src/main/java/org/elasticsearch/action/search/QueryPhaseResultConsumer.java @@ -113,7 +113,7 @@ public void close() { public void consumeResult(SearchPhaseResult result, Runnable next) { super.consumeResult(result, () -> {}); QuerySearchResult querySearchResult = result.queryResult(); - progressListener.notifyQueryResult(querySearchResult.getShardIndex()); + progressListener.notifyQueryResult(querySearchResult.getShardIndex(), querySearchResult); pendingMerges.consume(querySearchResult, next); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java index 4fb90cb871d21..2b7105cffe2bb 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchContextId.java @@ -8,15 +8,18 @@ package org.elasticsearch.action.search; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.ByteBufferStreamInput; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; @@ -24,11 +27,11 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterAware; +import java.io.ByteArrayInputStream; import java.io.IOException; -import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; import java.util.Base64; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -63,46 +66,54 @@ public static String encode( Map aliasFilter, TransportVersion version ) { - final Map shards = new HashMap<>(); - for (SearchPhaseResult searchPhaseResult : searchPhaseResults) { - final SearchShardTarget target = searchPhaseResult.getSearchShardTarget(); - shards.put( - target.getShardId(), - new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()) - ); - } - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setTransportVersion(version); - TransportVersion.writeVersion(version, out); - out.writeMap(shards, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); - out.writeMap(aliasFilter, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - return Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(out.bytes())); + final BytesReference bytesReference; + try (var encodedStreamOutput = new BytesStreamOutput()) { + try (var out = new OutputStreamStreamOutput(Base64.getUrlEncoder().wrap(encodedStreamOutput))) { + out.setTransportVersion(version); + TransportVersion.writeVersion(version, out); + out.writeCollection(searchPhaseResults, SearchContextId::writeSearchPhaseResult); + out.writeMap(aliasFilter, StreamOutput::writeWriteable); + } + bytesReference = encodedStreamOutput.bytes(); } catch (IOException e) { + assert false : e; throw new IllegalArgumentException(e); } + final BytesRef bytesRef = bytesReference.toBytesRef(); + return new String(bytesRef.bytes, bytesRef.offset, bytesRef.length, StandardCharsets.ISO_8859_1); + } + + private static void writeSearchPhaseResult(StreamOutput out, SearchPhaseResult searchPhaseResult) throws IOException { + final SearchShardTarget target = searchPhaseResult.getSearchShardTarget(); + target.getShardId().writeTo(out); + new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()).writeTo(out); } public static SearchContextId decode(NamedWriteableRegistry namedWriteableRegistry, String id) { - final ByteBuffer byteBuffer; - try { - byteBuffer = ByteBuffer.wrap(Base64.getUrlDecoder().decode(id)); - } catch (Exception e) { - throw new IllegalArgumentException("invalid id: [" + id + "]", e); - } - try (StreamInput in = new NamedWriteableAwareStreamInput(new ByteBufferStreamInput(byteBuffer), namedWriteableRegistry)) { + try ( + var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(id.getBytes(StandardCharsets.ISO_8859_1))); + var in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(decodedInputStream), namedWriteableRegistry) + ) { final TransportVersion version = TransportVersion.readVersion(in); in.setTransportVersion(version); - final Map shards = in.readMap(ShardId::new, SearchContextIdForNode::new); - final Map aliasFilters = in.readMap(AliasFilter::readFrom); + final Map shards = Collections.unmodifiableMap( + in.readCollection(Maps::newHashMapWithExpectedSize, SearchContextId::readShardsMapEntry) + ); + final Map aliasFilters = in.readImmutableMap(AliasFilter::readFrom); if (in.available() > 0) { throw new IllegalArgumentException("Not all bytes were read"); } - return new SearchContextId(Collections.unmodifiableMap(shards), Collections.unmodifiableMap(aliasFilters)); + return new SearchContextId(shards, aliasFilters); } catch (IOException e) { + assert false : e; throw new IllegalArgumentException(e); } } + private static void readShardsMapEntry(StreamInput in, Map shards) throws IOException { + shards.put(new ShardId(in), new SearchContextIdForNode(in)); + } + public String[] getActualIndices() { final Set indices = new HashSet<>(); for (Map.Entry entry : shards().entrySet()) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java index 662b72e114a40..c7ad250892160 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchDfsQueryThenFetchAsyncAction.java @@ -27,6 +27,7 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction { private final QueryPhaseResultConsumer queryPhaseResultConsumer; + private final SearchProgressListener progressListener; SearchDfsQueryThenFetchAsyncAction( final Logger logger, @@ -63,7 +64,8 @@ final class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction clusters ); this.queryPhaseResultConsumer = queryPhaseResultConsumer; - SearchProgressListener progressListener = task.getProgressListener(); + this.progressListener = task.getProgressListener(); + // don't build the SearchShard list (can be expensive) if the SearchProgressListener won't use it if (progressListener != SearchProgressListener.NOOP) { notifyListShards(progressListener, clusters, request.source()); } @@ -98,4 +100,9 @@ protected SearchPhase getNextPhase(final SearchPhaseResults res context ); } + + @Override + protected void onShardGroupFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) { + progressListener.notifyQueryFailure(shardIndex, shardTarget, exc); + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java index 8f81b3995858e..c6b0022593179 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchProgressListener.java @@ -16,6 +16,7 @@ import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.query.QuerySearchResult; import java.util.List; import java.util.Objects; @@ -39,15 +40,23 @@ public abstract class SearchProgressListener { * @param skippedShards The list of skipped shards. * @param clusters The statistics for remote clusters included in the search. * @param fetchPhase true if the search needs a fetch phase, false otherwise. + * @param timeProvider absolute and relative time provider for this search **/ - protected void onListShards(List shards, List skippedShards, Clusters clusters, boolean fetchPhase) {} + protected void onListShards( + List shards, + List skippedShards, + Clusters clusters, + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider + ) {} /** * Executed when a shard returns a query result. * - * @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards} )}. + * @param shardIndex The index of the shard in the list provided by {@link SearchProgressListener#onListShards} )}. + * @param queryResult */ - protected void onQueryResult(int shardIndex) {} + protected void onQueryResult(int shardIndex, QuerySearchResult queryResult) {} /** * Executed when a shard reports a query failure. @@ -95,18 +104,24 @@ protected void onFetchResult(int shardIndex) {} */ protected void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {} - final void notifyListShards(List shards, List skippedShards, Clusters clusters, boolean fetchPhase) { + final void notifyListShards( + List shards, + List skippedShards, + Clusters clusters, + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider + ) { this.shards = shards; try { - onListShards(shards, skippedShards, clusters, fetchPhase); + onListShards(shards, skippedShards, clusters, fetchPhase, timeProvider); } catch (Exception e) { logger.warn("Failed to execute progress listener on list shards", e); } } - final void notifyQueryResult(int shardIndex) { + final void notifyQueryResult(int shardIndex, QuerySearchResult queryResult) { try { - onQueryResult(shardIndex); + onQueryResult(shardIndex, queryResult); } catch (Exception e) { logger.warn(() -> "[" + shards.get(shardIndex) + "] Failed to execute progress listener on query result", e); } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java index ef24833d39df0..2dfd46182266c 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchQueryThenFetchAsyncAction.java @@ -77,7 +77,7 @@ class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction 0) { @@ -263,16 +264,16 @@ public SearchRequest(StreamInput in) throws IOException { finalReduce = true; } ccsMinimizeRoundtrips = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0) && in.readBoolean()) { minCompatibleShardNode = Version.readVersion(in); } else { minCompatibleShardNode = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { waitForCheckpoints = in.readMap(StreamInput::readLongArray); waitForCheckpointsTimeout = in.readTimeValue(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { forceSyntheticSource = false; @@ -288,7 +289,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(preference); out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } @@ -304,15 +305,15 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(finalReduce); } out.writeBoolean(ccsMinimizeRoundtrips); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeBoolean(minCompatibleShardNode != null); if (minCompatibleShardNode != null) { Version.writeVersion(minCompatibleShardNode, out); } } - TransportVersion waitForCheckpointsVersion = TransportVersion.V_7_16_0; + TransportVersion waitForCheckpointsVersion = TransportVersions.V_7_16_0; if (out.getTransportVersion().onOrAfter(waitForCheckpointsVersion)) { - out.writeMap(waitForCheckpoints, StreamOutput::writeString, StreamOutput::writeLongArray); + out.writeMap(waitForCheckpoints, StreamOutput::writeLongArray); out.writeTimeValue(waitForCheckpointsTimeout); } else if (waitForCheckpoints.isEmpty() == false) { throw new IllegalArgumentException( @@ -324,7 +325,7 @@ public void writeTo(StreamOutput out) throws IOException { + "] or greater." ); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java index 02f0e4f9db129..fd995d284ea69 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.search; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.common.Strings; @@ -95,7 +95,7 @@ public SearchResponse(StreamInput in) throws IOException { scrollId = in.readOptionalString(); tookInMillis = in.readVLong(); skippedShards = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { pointInTimeId = in.readOptionalString(); } else { pointInTimeId = null; @@ -440,7 +440,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scrollId); out.writeVLong(tookInMillis); out.writeVInt(skippedShards); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalString(pointInTimeId); } } @@ -474,8 +474,8 @@ public static class Clusters implements ToXContentFragment, Writeable { // updates to the Cluster occur by CAS swapping in new Cluster objects into the AtomicReference in the map. private final Map> clusterInfo; - // this field is not Writeable, as it is only needed on the initial "querying cluster" coordinator of a CCS search - private final transient boolean ccsMinimizeRoundtrips; + // not Writeable since it is only needed on the (primary) CCS coordinator + private transient Boolean ccsMinimizeRoundtrips; /** * For use with cross-cluster searches. @@ -485,39 +485,44 @@ public static class Clusters implements ToXContentFragment, Writeable { * @param localIndices The localIndices to be searched - null if no local indices are to be searched * @param remoteClusterIndices mapping of clusterAlias -> OriginalIndices for each remote cluster * @param ccsMinimizeRoundtrips whether minimizing roundtrips for the CCS + * @param skipUnavailablePredicate given a cluster alias, returns true if that cluster is skip_unavailable=true + * and false otherwise */ public Clusters( @Nullable OriginalIndices localIndices, Map remoteClusterIndices, - boolean ccsMinimizeRoundtrips + boolean ccsMinimizeRoundtrips, + Predicate skipUnavailablePredicate ) { + assert remoteClusterIndices.size() > 0 : "At least one remote cluster must be passed into this Cluster constructor"; this.total = remoteClusterIndices.size() + (localIndices == null ? 0 : 1); - assert total >= 1 : "No local indices or remote clusters passed in"; this.successful = 0; // calculated from clusterInfo map for minimize_roundtrips this.skipped = 0; // calculated from clusterInfo map for minimize_roundtrips this.ccsMinimizeRoundtrips = ccsMinimizeRoundtrips; Map> m = new HashMap<>(); if (localIndices != null) { String localKey = RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY; - Cluster c = new Cluster(localKey, String.join(",", localIndices.indices())); + Cluster c = new Cluster(localKey, String.join(",", localIndices.indices()), false); m.put(localKey, new AtomicReference<>(c)); } for (Map.Entry remote : remoteClusterIndices.entrySet()) { String clusterAlias = remote.getKey(); - Cluster c = new Cluster(clusterAlias, String.join(",", remote.getValue().indices())); + boolean skipUnavailable = skipUnavailablePredicate.test(clusterAlias); + Cluster c = new Cluster(clusterAlias, String.join(",", remote.getValue().indices()), skipUnavailable); m.put(clusterAlias, new AtomicReference<>(c)); } this.clusterInfo = Collections.unmodifiableMap(m); } /** - * Used for searches that are either not cross-cluster or CCS with minimize_roundtrips=false. + * Used for searches that are either not cross-cluster. * For CCS minimize_roundtrips=true use {@code Clusters(OriginalIndices, Map, boolean)} * @param total total number of clusters in the search * @param successful number of successful clusters in the search * @param skipped number of skipped clusters (skipped can only happen for remote clusters with skip_unavailable=true) */ public Clusters(int total, int successful, int skipped) { + // TODO: change assert to total == 1 or total = 0 - this should probably only be used for local searches now assert total >= 0 && successful >= 0 && skipped >= 0 && successful <= total : "total: " + total + " successful: " + successful + " skipped: " + skipped; assert skipped == total - successful : "total: " + total + " successful: " + successful + " skipped: " + skipped; @@ -532,8 +537,8 @@ public Clusters(StreamInput in) throws IOException { this.total = in.readVInt(); this.successful = in.readVInt(); this.skipped = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_053)) { - List clusterList = in.readList(Cluster::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_053)) { + List clusterList = in.readCollectionAsList(Cluster::new); if (clusterList.isEmpty()) { this.clusterInfo = Collections.emptyMap(); } else { @@ -544,7 +549,6 @@ public Clusters(StreamInput in) throws IOException { } else { this.clusterInfo = Collections.emptyMap(); } - this.ccsMinimizeRoundtrips = false; assert total >= 0 : "total is negative: " + total; assert total >= successful + skipped : "successful + skipped is larger than total. total: " + total + " successful: " + successful + " skipped: " + skipped; @@ -565,12 +569,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(total); out.writeVInt(successful); out.writeVInt(skipped); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_053)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_053)) { if (clusterInfo != null) { List clusterList = clusterInfo.values().stream().map(AtomicReference::get).toList(); - out.writeList(clusterList); + out.writeCollection(clusterList); } else { - out.writeList(Collections.emptyList()); + out.writeCollection(Collections.emptyList()); } } } @@ -690,7 +694,7 @@ public int getSkipped() { /** * @return whether this search was a cross cluster search done with ccsMinimizeRoundtrips=true */ - public boolean isCcsMinimizeRoundtrips() { + public Boolean isCcsMinimizeRoundtrips() { return ccsMinimizeRoundtrips; } @@ -726,14 +730,19 @@ public String toString() { /** * @return true if any underlying Cluster objects have PARTIAL, SKIPPED, FAILED or RUNNING status. + * or any Cluster is marked as timedOut. */ public boolean hasPartialResults() { - for (AtomicReference cluster : clusterInfo.values()) { - switch (cluster.get().getStatus()) { + for (AtomicReference clusterRef : clusterInfo.values()) { + Cluster cluster = clusterRef.get(); + switch (cluster.getStatus()) { case PARTIAL, SKIPPED, FAILED, RUNNING -> { return true; } } + if (cluster.isTimedOut()) { + return true; + } } return false; } @@ -746,6 +755,13 @@ public boolean hasClusterObjects() { return clusterInfo.keySet().size() > 0; } + /** + * @return true if this Clusters object has been initialized with remote Cluster objects + * This will be false for local-cluster (non-CCS) only searches. + */ + public boolean hasRemoteClusters() { + return total > 1 || clusterInfo.keySet().stream().anyMatch(alias -> alias != RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY); + } } /** @@ -761,8 +777,11 @@ public static class Cluster implements ToXContentFragment, Writeable { static final ParseField INDICES_FIELD = new ParseField("indices"); static final ParseField STATUS_FIELD = new ParseField("status"); + private static final boolean SKIP_UNAVAILABLE_DEFAULT = false; + private final String clusterAlias; private final String indexExpression; // original index expression from the user for this cluster + private final boolean skipUnavailable; private final Status status; private final Integer totalShards; private final Integer successfulShards; @@ -794,9 +813,10 @@ public String toString() { * @param clusterAlias clusterAlias as defined in the remote cluster settings or RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY * for the local cluster * @param indexExpression the original (not resolved/concrete) indices expression provided for this cluster. + * @param skipUnavailable whether this Cluster is marked as skip_unavailable in remote cluster settings */ - public Cluster(String clusterAlias, String indexExpression) { - this(clusterAlias, indexExpression, Status.RUNNING, null, null, null, null, null, null, false); + public Cluster(String clusterAlias, String indexExpression, boolean skipUnavailable) { + this(clusterAlias, indexExpression, skipUnavailable, Status.RUNNING, null, null, null, null, null, null, false); } /** @@ -806,16 +826,24 @@ public Cluster(String clusterAlias, String indexExpression) { * @param clusterAlias clusterAlias as defined in the remote cluster settings or RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY * for the local cluster * @param indexExpression the original (not resolved/concrete) indices expression provided for this cluster. + * @param skipUnavailable whether cluster is marked as skip_unavailable in remote cluster settings * @param status current status of the search on this Cluster * @param failures list of failures that occurred during the search on this Cluster */ - public Cluster(String clusterAlias, String indexExpression, Status status, List failures) { - this(clusterAlias, indexExpression, status, null, null, null, null, failures, null, false); + public Cluster( + String clusterAlias, + String indexExpression, + boolean skipUnavailable, + Status status, + List failures + ) { + this(clusterAlias, indexExpression, skipUnavailable, status, null, null, null, null, failures, null, false); } public Cluster( String clusterAlias, String indexExpression, + boolean skipUnavailable, Status status, Integer totalShards, Integer successfulShards, @@ -830,6 +858,7 @@ public Cluster( assert status != null : "status of Cluster cannot be null"; this.clusterAlias = clusterAlias; this.indexExpression = indexExpression; + this.skipUnavailable = skipUnavailable; this.status = status; this.totalShards = totalShards; this.successfulShards = successfulShards; @@ -855,7 +884,98 @@ public Cluster(StreamInput in) throws IOException { this.took = new TimeValue(took); } this.timedOut = in.readBoolean(); - this.failures = Collections.unmodifiableList(in.readList(ShardSearchFailure::readShardSearchFailure)); + this.failures = Collections.unmodifiableList(in.readCollectionAsList(ShardSearchFailure::readShardSearchFailure)); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_066)) { + this.skipUnavailable = in.readBoolean(); + } else { + this.skipUnavailable = SKIP_UNAVAILABLE_DEFAULT; + } + } + + /** + * Since the Cluster object is immutable, use this Builder class to create + * a new Cluster object using the "copyFrom" Cluster passed in and set only + * changed values. + * + * Since the clusterAlias, indexExpression and skipUnavailable fields are + * never changed once set, this Builder provides no setter method for them. + * All other fields can be set and override the value in the "copyFrom" Cluster. + */ + public static class Builder { + private Status status; + private Integer totalShards; + private Integer successfulShards; + private Integer skippedShards; + private Integer failedShards; + private List failures; + private TimeValue took; + private Boolean timedOut; + private Cluster original; + + public Builder(Cluster copyFrom) { + this.original = copyFrom; + } + + /** + * @return new Cluster object using the new values passed in via setters + * or the values in the "copyFrom" Cluster object set in the + * Builder constructor. + */ + public Cluster build() { + return new Cluster( + original.getClusterAlias(), + original.getIndexExpression(), + original.isSkipUnavailable(), + status != null ? status : original.getStatus(), + totalShards != null ? totalShards : original.getTotalShards(), + successfulShards != null ? successfulShards : original.getSuccessfulShards(), + skippedShards != null ? skippedShards : original.getSkippedShards(), + failedShards != null ? failedShards : original.getFailedShards(), + failures != null ? failures : original.getFailures(), + took != null ? took : original.getTook(), + timedOut != null ? timedOut : original.isTimedOut() + ); + } + + public Builder setStatus(Status status) { + this.status = status; + return this; + } + + public Builder setTotalShards(int totalShards) { + this.totalShards = totalShards; + return this; + } + + public Builder setSuccessfulShards(int successfulShards) { + this.successfulShards = successfulShards; + return this; + } + + public Builder setSkippedShards(int skippedShards) { + this.skippedShards = skippedShards; + return this; + } + + public Builder setFailedShards(int failedShards) { + this.failedShards = failedShards; + return this; + } + + public Builder setFailures(List failures) { + this.failures = failures; + return this; + } + + public Builder setTook(TimeValue took) { + this.took = took; + return this; + } + + public Builder setTimedOut(boolean timedOut) { + this.timedOut = timedOut; + return this; + } } @Override @@ -869,7 +989,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalInt(failedShards); out.writeOptionalLong(took == null ? null : took.millis()); out.writeBoolean(timedOut); - out.writeList(failures); + out.writeCollection(failures); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_067)) { + out.writeBoolean(skipUnavailable); + } } @Override @@ -985,10 +1108,12 @@ public static Cluster fromXContent(String clusterAlias, XContentParser parser) t Integer skippedShardsFinal = skippedShards == -1 ? null : skippedShards; Integer failedShardsFinal = failedShards == -1 ? null : failedShards; TimeValue tookTimeValue = took == -1L ? null : new TimeValue(took); + boolean skipUnavailable = SKIP_UNAVAILABLE_DEFAULT; // skipUnavailable is not exposed to XContent, so just use default return new Cluster( clusterName, indexExpression, + skipUnavailable, SearchResponse.Cluster.Status.valueOf(status.toUpperCase(Locale.ROOT)), totalShardsFinal, successfulShardsFinal, @@ -1008,6 +1133,10 @@ public String getIndexExpression() { return indexExpression; } + public boolean isSkipUnavailable() { + return skipUnavailable; + } + public Status getStatus() { return status; } @@ -1043,13 +1172,11 @@ public Integer getFailedShards() { @Override public String toString() { return "Cluster{" - + "clusterAlias='" + + "alias='" + clusterAlias + '\'' + ", status=" + status - + ", failures=" - + failures + ", totalShards=" + totalShards + ", successfulShards=" @@ -1058,8 +1185,17 @@ public String toString() { + skippedShards + ", failedShards=" + failedShards - + ", searchLatencyMillis=" + + ", failures(sz)=" + + failures.size() + + ", took=" + took + + ", timedOut=" + + timedOut + + ", indexExpression='" + + indexExpression + + '\'' + + ", skipUnavailable=" + + skipUnavailable + '}'; } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardsGroup.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardsGroup.java index ae2b0cecb7b85..19ba56be5749a 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardsGroup.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShardsGroup.java @@ -49,7 +49,7 @@ public SearchShardsGroup(ShardId shardId, List allocatedNodes, boolean s public SearchShardsGroup(StreamInput in) throws IOException { this.shardId = new ShardId(in); - this.allocatedNodes = in.readStringList(); + this.allocatedNodes = in.readStringCollectionAsList(); this.skipped = in.readBoolean(); this.preFiltered = true; } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchShardsResponse.java b/server/src/main/java/org/elasticsearch/action/search/SearchShardsResponse.java index 5f34d40e2efb3..71ff3e74fe027 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchShardsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchShardsResponse.java @@ -47,8 +47,8 @@ public SearchShardsResponse( public SearchShardsResponse(StreamInput in) throws IOException { super(in); - this.groups = in.readList(SearchShardsGroup::new); - this.nodes = in.readList(DiscoveryNode::new); + this.groups = in.readCollectionAsList(SearchShardsGroup::new); + this.nodes = in.readCollectionAsList(DiscoveryNode::new); this.aliasFilters = in.readMap(AliasFilter::readFrom); } @@ -56,7 +56,7 @@ public SearchShardsResponse(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeCollection(groups); out.writeCollection(nodes); - out.writeMap(aliasFilters, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(aliasFilters, StreamOutput::writeWriteable); } /** @@ -109,4 +109,9 @@ static SearchShardsResponse fromLegacyResponse(ClusterSearchShardsResponse oldRe assert groups.stream().noneMatch(SearchShardsGroup::preFiltered) : "legacy responses must not have preFiltered set"; return new SearchShardsResponse(groups, Arrays.asList(oldResp.getNodes()), aliasFilters); } + + @Override + public String toString() { + return "SearchShardsResponse{" + "groups=" + groups + ", nodes=" + nodes + ", aliasFilters=" + aliasFilters + '}'; + } } diff --git a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java index 34cd7e6165c4b..9714fc7574418 100644 --- a/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java +++ b/server/src/main/java/org/elasticsearch/action/search/SearchTransportService.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -155,7 +155,7 @@ public void sendCanMatch( SearchTask task, final ActionListener listener ) { - if (connection.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) + if (connection.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) && connection.getNode().getVersion().onOrAfter(Version.V_7_16_0)) { transportService.sendChildRequest( connection, diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java index 07fed9598ea8b..9c78e5ad62aea 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportOpenPointInTimeAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.search.SearchPhaseResult; @@ -167,10 +167,29 @@ protected void executePhaseOnShard( @Override protected SearchPhase getNextPhase(SearchPhaseResults results, SearchPhaseContext context) { return new SearchPhase(getName()) { + + private void onExecuteFailure(Exception e) { + onPhaseFailure(this, "sending response failed", e); + } + @Override public void run() { - final AtomicArray atomicArray = results.getAtomicArray(); - sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, atomicArray); + execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + onExecuteFailure(e); + } + + @Override + protected void doRun() { + sendSearchResponse(InternalSearchResponse.EMPTY_WITH_TOTAL_HITS, results.getAtomicArray()); + } + + @Override + public boolean isForceExecution() { + return true; // we already created the PIT, no sense in rejecting the task that sends the response. + } + }); } }; } diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java index 2cd828ab25295..4cf07263fc70d 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchAction.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.IndicesRequest; @@ -44,6 +44,7 @@ import org.elasticsearch.common.logging.DeprecationLogger; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; +import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.core.Nullable; @@ -90,7 +91,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiConsumer; import java.util.function.BiFunction; @@ -256,7 +256,7 @@ private Map resolveIndexBoosts(SearchRequest searchRequest, Clust * to moving backwards due to NTP and other such complexities, etc.). There are also issues with * using a relative clock for reporting real time. Thus, we simply separate these two uses. */ - record SearchTimeProvider(long absoluteStartMillis, long relativeStartNanos, LongSupplier relativeCurrentNanosProvider) { + public record SearchTimeProvider(long absoluteStartMillis, long relativeStartNanos, LongSupplier relativeCurrentNanosProvider) { /** * Instantiates a new search time provider. The absolute start time is the real clock time @@ -269,9 +269,9 @@ record SearchTimeProvider(long absoluteStartMillis, long relativeStartNanos, Lon * @param relativeStartNanos the relative start time in nanoseconds * @param relativeCurrentNanosProvider provides the current relative time */ - SearchTimeProvider {} + public SearchTimeProvider {} - long buildTookInMillis() { + public long buildTookInMillis() { return TimeUnit.NANOSECONDS.toMillis(relativeCurrentNanosProvider.getAsLong() - relativeStartNanos); } } @@ -295,6 +295,7 @@ void executeRequest( ); ActionListener rewriteListener = listener.delegateFailureAndWrap((delegate, rewritten) -> { final SearchContextId searchContext; + // key to map is clusterAlias final Map remoteClusterIndices; if (ccsCheckCompatibility) { checkCCSVersionCompatibility(rewritten); @@ -327,17 +328,23 @@ void executeRequest( && rewritten.source().aggregations() != null ? searchService.aggReduceContextBuilder(task::isCancelled, rewritten.source().aggregations()) : null; - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteClusterIndices, true); + SearchResponse.Clusters clusters = new SearchResponse.Clusters( + localIndices, + remoteClusterIndices, + true, + alias -> remoteClusterService.isSkipUnavailable(alias) + ); if (localIndices == null) { // Notify the progress listener that a CCS with minimize_roundtrips is happening remote-only (no local shards) - task.getProgressListener().notifyListShards(Collections.emptyList(), Collections.emptyList(), initClusters, false); + task.getProgressListener() + .notifyListShards(Collections.emptyList(), Collections.emptyList(), clusters, false, timeProvider); } ccsRemoteReduce( parentTaskId, rewritten, localIndices, remoteClusterIndices, - initClusters, + clusters, timeProvider, aggregationReduceContextBuilder, remoteClusterService, @@ -349,13 +356,18 @@ void executeRequest( r, localIndices, clusterState, - initClusters, + clusters, searchContext, searchPhaseProvider.apply(l) ) ); } else { - AtomicInteger skippedClusters = new AtomicInteger(0); + SearchResponse.Clusters clusters = new SearchResponse.Clusters( + localIndices, + remoteClusterIndices, + false, + alias -> remoteClusterService.isSkipUnavailable(alias) + ); // TODO: pass parentTaskId collectSearchShards( rewritten.indicesOptions(), @@ -364,8 +376,9 @@ void executeRequest( rewritten.source() != null ? rewritten.source().query() : null, Objects.requireNonNullElse(rewritten.allowPartialSearchResults(), searchService.defaultAllowPartialSearchResults()), searchContext, - skippedClusters, remoteClusterIndices, + clusters, + timeProvider, transportService, delegate.delegateFailureAndWrap((finalDelegate, searchShardsResponses) -> { final BiFunction clusterNodeLookup = getRemoteClusterNodeLookup( @@ -392,9 +405,6 @@ void executeRequest( remoteAliasFilters ); } - int localClusters = localIndices == null ? 0 : 1; - int totalClusters = remoteClusterIndices.size() + localClusters; - int successfulClusters = searchShardsResponses.size() + localClusters; executeSearch( task, timeProvider, @@ -404,7 +414,7 @@ void executeRequest( clusterNodeLookup, clusterState, remoteAliasFilters, - new SearchResponse.Clusters(totalClusters, successfulClusters, skippedClusters.get()), + clusters, searchContext, searchPhaseProvider.apply(finalDelegate) ); @@ -437,7 +447,7 @@ static void adjustSearchType(SearchRequest searchRequest, boolean singleShard) { } } - static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { + public static boolean shouldMinimizeRoundtrips(SearchRequest searchRequest) { if (searchRequest.isCcsMinimizeRoundtrips() == false) { return false; } @@ -551,7 +561,6 @@ public void onFailure(Exception e) { timeProvider, aggReduceContextBuilder ); - AtomicInteger skippedClusters = new AtomicInteger(0); final AtomicReference exceptions = new AtomicReference<>(); int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1); final CountDown countDown = new CountDown(totalClusters); @@ -571,7 +580,6 @@ public void onFailure(Exception e) { clusterAlias, skipUnavailable, countDown, - skippedClusters, exceptions, searchResponseMerger, clusters, @@ -589,7 +597,6 @@ public void onFailure(Exception e) { RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, false, countDown, - skippedClusters, exceptions, searchResponseMerger, clusters, @@ -633,6 +640,9 @@ static SearchResponseMerger createSearchResponseMerger( return new SearchResponseMerger(from, size, trackTotalHitsUpTo, timeProvider, aggReduceContextBuilder); } + /** + * Used for ccs_minimize_roundtrips=false + */ static void collectSearchShards( IndicesOptions indicesOptions, String preference, @@ -640,8 +650,9 @@ static void collectSearchShards( QueryBuilder query, boolean allowPartialResults, SearchContextId searchContext, - AtomicInteger skippedClusters, Map remoteIndicesByCluster, + SearchResponse.Clusters clusters, + SearchTimeProvider timeProvider, TransportService transportService, ActionListener> listener ) { @@ -657,14 +668,14 @@ static void collectSearchShards( clusterAlias, skipUnavailable, responsesCountDown, - skippedClusters, exceptions, - null, + clusters.getCluster(clusterAlias), listener ) { @Override void innerOnResponse(SearchShardsResponse searchShardsResponse) { assert ThreadPool.assertCurrentThreadPool(ThreadPool.Names.SEARCH_COORDINATION); + ccsClusterInfoUpdate(searchShardsResponse, cluster, timeProvider); searchShardsResponses.put(clusterAlias, searchShardsResponse); } @@ -680,7 +691,7 @@ Map createFinalResponse() { final String[] indices = entry.getValue().indices(); final Executor responseExecutor = transportService.getThreadPool().executor(ThreadPool.Names.SEARCH_COORDINATION); // TODO: support point-in-time - if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (searchContext == null && connection.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { SearchShardsRequest searchShardsRequest = new SearchShardsRequest( indices, indicesOptions, @@ -698,6 +709,7 @@ Map createFinalResponse() { new ActionListenerResponseHandler<>(singleListener, SearchShardsResponse::new, responseExecutor) ); } else { + // does not do a can-match ClusterSearchShardsRequest searchShardsRequest = new ClusterSearchShardsRequest(indices).indicesOptions( indicesOptions ).local(true).preference(preference).routing(routing); @@ -725,7 +737,6 @@ private static ActionListener createCCSListener( String clusterAlias, boolean skipUnavailable, CountDown countDown, - AtomicInteger skippedClusters, AtomicReference exceptions, SearchResponseMerger searchResponseMerger, SearchResponse.Clusters clusters, @@ -735,7 +746,6 @@ private static ActionListener createCCSListener( clusterAlias, skipUnavailable, countDown, - skippedClusters, exceptions, clusters.getCluster(clusterAlias), originalListener @@ -772,16 +782,9 @@ static void ccsClusterInfoUpdate( boolean swapped; do { SearchResponse.Cluster orig = clusterRef.get(); - String clusterAlias = orig.getClusterAlias(); - List failures; - if (orig.getFailures() != null) { - failures = new ArrayList<>(orig.getFailures()); - } else { - failures = new ArrayList<>(1); - } - failures.add(failure); - String indexExpression = orig.getIndexExpression(); - SearchResponse.Cluster updated = new SearchResponse.Cluster(clusterAlias, indexExpression, status, failures); + // returns unmodifiable list based on the original one passed plus the appended failure + List failures = CollectionUtils.appendToCopy(orig.getFailures(), failure); + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(orig).setStatus(status).setFailures(failures).build(); swapped = clusterRef.compareAndSet(orig, updated); } while (swapped == false); } @@ -824,22 +827,51 @@ private static void ccsClusterInfoUpdate( boolean swapped; do { SearchResponse.Cluster orig = clusterRef.get(); - SearchResponse.Cluster updated = new SearchResponse.Cluster( - orig.getClusterAlias(), - orig.getIndexExpression(), - status, - searchResponse.getTotalShards(), - searchResponse.getSuccessfulShards(), - searchResponse.getSkippedShards(), - searchResponse.getFailedShards(), - Arrays.asList(searchResponse.getShardFailures()), - searchResponse.getTook(), - searchResponse.isTimedOut() - ); + SearchResponse.Cluster updated = new SearchResponse.Cluster.Builder(orig).setStatus(status) + .setTotalShards(searchResponse.getTotalShards()) + .setSuccessfulShards(searchResponse.getSuccessfulShards()) + .setSkippedShards(searchResponse.getSkippedShards()) + .setFailedShards(searchResponse.getFailedShards()) + .setFailures(Arrays.asList(searchResponse.getShardFailures())) + .setTook(searchResponse.getTook()) + .setTimedOut(searchResponse.isTimedOut()) + .build(); swapped = clusterRef.compareAndSet(orig, updated); } while (swapped == false); } + /** + * Edge case --- + * Typically we don't need to update a Cluster object after the SearchShards API call, since the + * skipped shards will be passed into SearchProgressListener.onListShards. + * However, there is an edge case where the remote SearchShards API call returns no shards at all. + * So in that case, nothing for this cluster will be passed to onListShards, so we need to update + * the Cluster object to SUCCESSFUL status with shard counts of 0 and a filled in 'took' value. + * + * @param response from SearchShards API call to remote cluster + * @param clusterRef Reference Cluster to be updated + * @param timeProvider search time provider (for setting took value) + */ + private static void ccsClusterInfoUpdate( + SearchShardsResponse response, + AtomicReference clusterRef, + SearchTimeProvider timeProvider + ) { + if (response.getGroups().isEmpty()) { + clusterRef.updateAndGet( + orig -> new SearchResponse.Cluster.Builder(orig).setStatus(SearchResponse.Cluster.Status.SUCCESSFUL) + .setTotalShards(0) + .setSuccessfulShards(0) + .setSkippedShards(0) + .setFailedShards(0) + .setFailures(Collections.emptyList()) + .setTook(new TimeValue(timeProvider.buildTookInMillis())) + .setTimedOut(false) + .build() + ); + } + } + void executeLocalSearch( Task task, SearchTimeProvider timeProvider, @@ -1231,6 +1263,14 @@ public SearchPhase newSearchPhase( }) ); } else { + // for synchronous CCS minimize_roundtrips=false, use the CCSSingleCoordinatorSearchProgressListener + // (AsyncSearchTask will not return SearchProgressListener.NOOP, since it uses its own progress listener + // which delegates to CCSSingleCoordinatorSearchProgressListener when minimizing roundtrips) + if (clusters.isCcsMinimizeRoundtrips() == false + && clusters.hasRemoteClusters() + && task.getProgressListener() == SearchProgressListener.NOOP) { + task.setProgressListener(new CCSSingleCoordinatorSearchProgressListener()); + } final QueryPhaseResultConsumer queryResultConsumer = searchPhaseController.newSearchPhaseResults( executor, circuitBreaker, @@ -1367,7 +1407,6 @@ abstract static class CCSActionListener implements Acti protected final String clusterAlias; protected final boolean skipUnavailable; private final CountDown countDown; - private final AtomicInteger skippedClusters; private final AtomicReference exceptions; protected final AtomicReference cluster; private final ActionListener originalListener; @@ -1380,7 +1419,6 @@ abstract static class CCSActionListener implements Acti String clusterAlias, boolean skipUnavailable, CountDown countDown, - AtomicInteger skippedClusters, AtomicReference exceptions, @Nullable AtomicReference cluster, // null for ccs_minimize_roundtrips=false ActionListener originalListener @@ -1388,7 +1426,6 @@ abstract static class CCSActionListener implements Acti this.clusterAlias = clusterAlias; this.skipUnavailable = skipUnavailable; this.countDown = countDown; - this.skippedClusters = skippedClusters; this.exceptions = exceptions; this.cluster = cluster; this.originalListener = originalListener; @@ -1411,7 +1448,7 @@ public final void onFailure(Exception e) { if (cluster != null) { ccsClusterInfoUpdate(f, cluster, skipUnavailable); } - skippedClusters.incrementAndGet(); + // skippedClusters.incrementAndGet(); } else { if (cluster != null) { ccsClusterInfoUpdate(f, cluster, skipUnavailable); @@ -1506,12 +1543,12 @@ static List getLocalLocalShardsIteratorFromPointInTime( if (Strings.isEmpty(perNode.getClusterAlias())) { final ShardId shardId = entry.getKey(); final List targetNodes = new ArrayList<>(2); - // Prefer executing shard requests on nodes that are part of PIT first. - if (clusterState.nodes().nodeExists(perNode.getNode())) { - targetNodes.add(perNode.getNode()); - } try { final ShardIterator shards = OperationRouting.getShards(clusterState, shardId); + // Prefer executing shard requests on nodes that are part of PIT first. + if (clusterState.nodes().nodeExists(perNode.getNode())) { + targetNodes.add(perNode.getNode()); + } if (perNode.getSearchContextId().getSearcherId() != null) { for (ShardRouting shard : shards) { if (shard.currentNodeId().equals(perNode.getNode()) == false) { diff --git a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java index 5680a4525b468..632fbafa0536b 100644 --- a/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java +++ b/server/src/main/java/org/elasticsearch/action/search/TransportSearchHelper.java @@ -8,9 +8,13 @@ package org.elasticsearch.action.search; -import org.elasticsearch.TransportVersion; -import org.elasticsearch.common.io.stream.ByteArrayStreamInput; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.InputStreamStreamInput; +import org.elasticsearch.common.io.stream.OutputStreamStreamOutput; +import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.VersionCheckingStreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.concurrent.AtomicArray; @@ -21,8 +25,10 @@ import org.elasticsearch.search.internal.ShardSearchContextId; import org.elasticsearch.transport.RemoteClusterAware; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.UncheckedIOException; +import java.nio.charset.StandardCharsets; import java.util.Base64; public final class TransportSearchHelper { @@ -34,32 +40,40 @@ static InternalScrollSearchRequest internalScrollSearchRequest(ShardSearchContex } static String buildScrollId(AtomicArray searchPhaseResults) { - try { - BytesStreamOutput out = new BytesStreamOutput(); - out.writeString(INCLUDE_CONTEXT_UUID); - out.writeString(searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE); - out.writeCollection(searchPhaseResults.asList(), (o, searchPhaseResult) -> { - o.writeString(searchPhaseResult.getContextId().getSessionId()); - o.writeLong(searchPhaseResult.getContextId().getId()); - SearchShardTarget searchShardTarget = searchPhaseResult.getSearchShardTarget(); - if (searchShardTarget.getClusterAlias() != null) { - o.writeString( - RemoteClusterAware.buildRemoteIndexName(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()) - ); - } else { - o.writeString(searchShardTarget.getNodeId()); - } - }); - return Base64.getUrlEncoder().encodeToString(out.copyBytes().array()); + final BytesReference bytesReference; + try (var encodedStreamOutput = new BytesStreamOutput()) { + try (var out = new OutputStreamStreamOutput(Base64.getUrlEncoder().wrap(encodedStreamOutput))) { + out.writeString(INCLUDE_CONTEXT_UUID); + out.writeString( + searchPhaseResults.length() == 1 ? ParsedScrollId.QUERY_AND_FETCH_TYPE : ParsedScrollId.QUERY_THEN_FETCH_TYPE + ); + out.writeCollection(searchPhaseResults.asList(), (o, searchPhaseResult) -> { + o.writeString(searchPhaseResult.getContextId().getSessionId()); + o.writeLong(searchPhaseResult.getContextId().getId()); + SearchShardTarget searchShardTarget = searchPhaseResult.getSearchShardTarget(); + if (searchShardTarget.getClusterAlias() != null) { + o.writeString( + RemoteClusterAware.buildRemoteIndexName(searchShardTarget.getClusterAlias(), searchShardTarget.getNodeId()) + ); + } else { + o.writeString(searchShardTarget.getNodeId()); + } + }); + } + bytesReference = encodedStreamOutput.bytes(); } catch (IOException e) { + assert false : e; throw new UncheckedIOException(e); } + final BytesRef bytesRef = bytesReference.toBytesRef(); + return new String(bytesRef.bytes, bytesRef.offset, bytesRef.length, StandardCharsets.ISO_8859_1); } static ParsedScrollId parseScrollId(String scrollId) { - try { - byte[] bytes = Base64.getUrlDecoder().decode(scrollId); - ByteArrayStreamInput in = new ByteArrayStreamInput(bytes); + try ( + var decodedInputStream = Base64.getUrlDecoder().wrap(new ByteArrayInputStream(scrollId.getBytes(StandardCharsets.ISO_8859_1))); + var in = new InputStreamStreamInput(decodedInputStream) + ) { final boolean includeContextUUID; final String type; final String firstChunk = in.readString(); @@ -70,22 +84,13 @@ static ParsedScrollId parseScrollId(String scrollId) { includeContextUUID = false; type = firstChunk; } - SearchContextIdForNode[] context = new SearchContextIdForNode[in.readVInt()]; - for (int i = 0; i < context.length; ++i) { - final String contextUUID = includeContextUUID ? in.readString() : ""; - long id = in.readLong(); - String target = in.readString(); - String clusterAlias; - final int index = target.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR); - if (index == -1) { - clusterAlias = null; - } else { - clusterAlias = target.substring(0, index); - target = target.substring(index + 1); - } - context[i] = new SearchContextIdForNode(clusterAlias, target, new ShardSearchContextId(contextUUID, id)); - } - if (in.getPosition() != bytes.length) { + final SearchContextIdForNode[] context = in.readArray( + includeContextUUID + ? TransportSearchHelper::readSearchContextIdForNodeIncludingContextUUID + : TransportSearchHelper::readSearchContextIdForNodeExcludingContextUUID, + SearchContextIdForNode[]::new + ); + if (in.available() > 0) { throw new IllegalArgumentException("Not all bytes were read"); } return new ParsedScrollId(scrollId, type, context); @@ -94,6 +99,28 @@ static ParsedScrollId parseScrollId(String scrollId) { } } + private static SearchContextIdForNode readSearchContextIdForNodeIncludingContextUUID(StreamInput in) throws IOException { + return innerReadSearchContextIdForNode(in.readString(), in); + } + + private static SearchContextIdForNode readSearchContextIdForNodeExcludingContextUUID(StreamInput in) throws IOException { + return innerReadSearchContextIdForNode("", in); + } + + private static SearchContextIdForNode innerReadSearchContextIdForNode(String contextUUID, StreamInput in) throws IOException { + long id = in.readLong(); + String target = in.readString(); + String clusterAlias; + final int index = target.indexOf(RemoteClusterAware.REMOTE_CLUSTER_INDEX_SEPARATOR); + if (index == -1) { + clusterAlias = null; + } else { + clusterAlias = target.substring(0, index); + target = target.substring(index + 1); + } + return new SearchContextIdForNode(clusterAlias, target, new ShardSearchContextId(contextUUID, id)); + } + /** * Using the 'search.check_ccs_compatibility' setting, clients can ask for an early * check that inspects the incoming request and tries to verify that it can be handled by @@ -105,14 +132,14 @@ static ParsedScrollId parseScrollId(String scrollId) { */ public static void checkCCSVersionCompatibility(Writeable writeableRequest) { try { - writeableRequest.writeTo(new VersionCheckingStreamOutput(TransportVersion.MINIMUM_CCS_VERSION)); + writeableRequest.writeTo(new VersionCheckingStreamOutput(TransportVersions.MINIMUM_CCS_VERSION)); } catch (Exception e) { // if we cannot serialize, raise this as an error to indicate to the caller that CCS has problems with this request throw new IllegalArgumentException( "[" + writeableRequest.getClass() + "] is not compatible with version " - + TransportVersion.MINIMUM_CCS_VERSION + + TransportVersions.MINIMUM_CCS_VERSION + " and the '" + SearchService.CCS_VERSION_CHECK_SETTING.getKey() + "' setting is enabled.", @@ -121,7 +148,5 @@ public static void checkCCSVersionCompatibility(Writeable writeableRequest) { } } - private TransportSearchHelper() { - - } + private TransportSearchHelper() {} } diff --git a/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java b/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java index 98c19fa5174dc..a4e2121c56aaf 100644 --- a/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java +++ b/server/src/main/java/org/elasticsearch/action/support/ListenerTimeouts.java @@ -14,6 +14,7 @@ import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; @@ -35,7 +36,7 @@ public static ActionListener wrapWithTimeout( ThreadPool threadPool, ActionListener listener, TimeValue timeout, - String executor, + Executor executor, String listenerName ) { return wrapWithTimeout(threadPool, timeout, executor, listener, (ignore) -> { @@ -58,7 +59,7 @@ public static ActionListener wrapWithTimeout( public static ActionListener wrapWithTimeout( ThreadPool threadPool, TimeValue timeout, - String executor, + Executor executor, ActionListener listener, Consumer> onTimeout ) { diff --git a/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java b/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java index 908d08b70a800..3e5114ddcfa9a 100644 --- a/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/RetryableAction.java @@ -18,6 +18,7 @@ import org.elasticsearch.threadpool.ThreadPool; import java.util.ArrayDeque; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.core.Strings.format; @@ -39,42 +40,21 @@ public abstract class RetryableAction { private final long timeoutMillis; private final long startMillis; private final ActionListener finalListener; - private final String executor; + private final Executor executor; private volatile Scheduler.ScheduledCancellable retryTask; - public RetryableAction( - Logger logger, - ThreadPool threadPool, - TimeValue initialDelay, - TimeValue timeoutValue, - ActionListener listener - ) { - this(logger, threadPool, initialDelay, timeoutValue, listener, ThreadPool.Names.SAME); - } - public RetryableAction( Logger logger, ThreadPool threadPool, TimeValue initialDelay, TimeValue timeoutValue, ActionListener listener, - String executor + Executor executor ) { this(logger, threadPool, initialDelay, TimeValue.MAX_VALUE, timeoutValue, listener, executor); } - public RetryableAction( - Logger logger, - ThreadPool threadPool, - TimeValue initialDelay, - TimeValue maxDelayBound, - TimeValue timeoutValue, - ActionListener listener - ) { - this(logger, threadPool, initialDelay, maxDelayBound, timeoutValue, listener, ThreadPool.Names.SAME); - } - public RetryableAction( Logger logger, ThreadPool threadPool, @@ -82,7 +62,7 @@ public RetryableAction( TimeValue maxDelayBound, TimeValue timeoutValue, ActionListener listener, - String executor + Executor executor ) { this.logger = logger; this.threadPool = threadPool; @@ -106,7 +86,7 @@ public RetryableAction( public void run() { final RetryingListener retryingListener = new RetryingListener(initialDelayMillis, null); final Runnable runnable = createRunnable(retryingListener); - threadPool.executor(executor).execute(runnable); + executor.execute(runnable); } public void cancel(Exception e) { diff --git a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java index 1cc8226e994ff..96b54a951ccc9 100644 --- a/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java +++ b/server/src/main/java/org/elasticsearch/action/support/SubscribableListener.java @@ -305,14 +305,14 @@ public void complete(ActionListener listener) { * work. For instance, it could check that the race is not lost by calling {@link #isDone} whenever appropriate, or it could subscribe * another listener which performs any necessary cleanup steps. */ - public void addTimeout(TimeValue timeout, ThreadPool threadPool, String timeoutExecutor) { + public void addTimeout(TimeValue timeout, ThreadPool threadPool, Executor timeoutExecutor) { if (isDone()) { return; } addListener(ActionListener.running(scheduleTimeout(timeout, threadPool, timeoutExecutor))); } - private Runnable scheduleTimeout(TimeValue timeout, ThreadPool threadPool, String timeoutExecutor) { + private Runnable scheduleTimeout(TimeValue timeout, ThreadPool threadPool, Executor timeoutExecutor) { try { final var cancellable = threadPool.schedule( () -> onFailure(new ElasticsearchTimeoutException(Strings.format("timed out after [%s/%dms]", timeout, timeout.millis()))), diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java index e66785d5453f5..15c28bdf33c87 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/node/TransportBroadcastByNodeAction.java @@ -469,7 +469,7 @@ class NodeRequest extends TransportRequest implements IndicesRequest { NodeRequest(StreamInput in) throws IOException { super(in); indicesLevelRequest = readRequestFrom(in); - shards = in.readList(ShardRouting::new); + shards = in.readCollectionAsList(ShardRouting::new); nodeId = in.readString(); } @@ -507,7 +507,7 @@ public void writeTo(StreamOutput out) throws IOException { assert indicesLevelRequest.hasReferences(); super.writeTo(out); indicesLevelRequest.writeTo(out); - out.writeList(shards); + out.writeCollection(shards); out.writeString(nodeId); } @@ -552,9 +552,9 @@ class NodeResponse extends TransportResponse { super(in); nodeId = in.readString(); totalShards = in.readVInt(); - results = in.readList((stream) -> stream.readBoolean() ? readShardResult(stream) : null); + results = in.readCollectionAsList((stream) -> stream.readBoolean() ? readShardResult(stream) : null); if (in.readBoolean()) { - exceptions = in.readList(BroadcastShardOperationFailedException::new); + exceptions = in.readCollectionAsList(BroadcastShardOperationFailedException::new); } else { exceptions = null; } @@ -599,7 +599,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(results, StreamOutput::writeOptionalWriteable); out.writeBoolean(exceptions != null); if (exceptions != null) { - out.writeList(exceptions); + out.writeCollection(exceptions); } } } diff --git a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java index 64c6b3e98bc8b..bf8376cfc5481 100644 --- a/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/broadcast/unpromotable/BroadcastUnpromotableRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.support.broadcast.unpromotable; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -46,7 +46,7 @@ public BroadcastUnpromotableRequest(StreamInput in) throws IOException { indexShardRoutingTable = null; shardId = new ShardId(in); indices = new String[] { shardId.getIndex().getName() }; - failShardOnError = in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010) && in.readBoolean(); + failShardOnError = in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) && in.readBoolean(); } public BroadcastUnpromotableRequest(IndexShardRoutingTable indexShardRoutingTable) { @@ -77,7 +77,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeWriteable(shardId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(failShardOnError); } } diff --git a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java index 8e80444cc125c..22f0da70137af 100644 --- a/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java +++ b/server/src/main/java/org/elasticsearch/action/support/master/info/ClusterInfoRequest.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.support.master.info; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.MasterNodeReadRequest; @@ -31,7 +31,7 @@ public ClusterInfoRequest() {} public ClusterInfoRequest(StreamInput in) throws IOException { super(in); indices = in.readStringArray(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readStringArray(); } indicesOptions = IndicesOptions.readIndicesOptions(in); @@ -41,7 +41,7 @@ public ClusterInfoRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } indicesOptions.writeIndicesOptions(out); diff --git a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java index 263ff462c9e5f..46290fbffb8ca 100644 --- a/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java +++ b/server/src/main/java/org/elasticsearch/action/support/nodes/BaseNodesResponse.java @@ -31,7 +31,7 @@ protected BaseNodesResponse(StreamInput in) throws IOException { super(in); clusterName = new ClusterName(in); nodes = readNodesFrom(in); - failures = in.readList(FailedNodeException::new); + failures = in.readCollectionAsList(FailedNodeException::new); } protected BaseNodesResponse(ClusterName clusterName, List nodes, List failures) { @@ -97,7 +97,7 @@ public Map getNodesMap() { public void writeTo(StreamOutput out) throws IOException { clusterName.writeTo(out); writeNodesTo(out, nodes); - out.writeList(failures); + out.writeCollection(failures); } /** diff --git a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java index 17c2a68f21332..0b4d90a896c10 100644 --- a/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java +++ b/server/src/main/java/org/elasticsearch/action/support/replication/ReplicationOperation.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -293,7 +294,8 @@ public String toString() { threadPool, initialRetryBackoffBound, retryTimeout, - replicationListener + replicationListener, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java index 150545f51b14e..b631d30cfd8bb 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/MultiTermVectorsResponse.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -41,7 +41,7 @@ public Failure(String index, String id, Exception cause) { public Failure(StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore String type = in.readOptionalString(); if (type != null) { @@ -76,7 +76,7 @@ public Exception getCause() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeOptionalString(null); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java index fb5f1f6ee1348..6a1f67a821c7c 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.termvectors; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.RealtimeRequest; import org.elasticsearch.action.ValidateActions; @@ -128,7 +128,7 @@ public TermVectorsRequest() {} TermVectorsRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore in.readString(); } @@ -477,7 +477,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeString("_doc"); } diff --git a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java index 82174485d8292..e0c9b86e4139f 100644 --- a/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java +++ b/server/src/main/java/org/elasticsearch/action/termvectors/TermVectorsResponse.java @@ -16,7 +16,7 @@ import org.apache.lucene.util.ArrayUtil; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag; import org.elasticsearch.common.bytes.BytesArray; @@ -91,7 +91,7 @@ public TermVectorsResponse(String index, String id) { TermVectorsResponse(StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore in.readString(); } @@ -109,7 +109,7 @@ public TermVectorsResponse(String index, String id) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeString(MapperService.SINGLE_MAPPING_NAME); } diff --git a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java index 6801bf2c0fd10..600790b2fd841 100644 --- a/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java +++ b/server/src/main/java/org/elasticsearch/action/update/UpdateRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.action.update; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.index.IndexRequest; @@ -136,7 +136,7 @@ public UpdateRequest(StreamInput in) throws IOException { public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOException { super(shardId, in); waitForActiveShards = ActiveShardCount.readFrom(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected [_doc] but received [" + type + "]"; } @@ -159,7 +159,7 @@ public UpdateRequest(@Nullable ShardId shardId, StreamInput in) throws IOExcepti ifPrimaryTerm = in.readVLong(); detectNoop = in.readBoolean(); scriptedUpsert = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { requireAlias = in.readBoolean(); } else { requireAlias = false; @@ -860,7 +860,7 @@ public void writeThin(StreamOutput out) throws IOException { private void doWrite(StreamOutput out, boolean thin) throws IOException { waitForActiveShards.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); @@ -905,7 +905,7 @@ private void doWrite(StreamOutput out, boolean thin) throws IOException { out.writeVLong(ifPrimaryTerm); out.writeBoolean(detectNoop); out.writeBoolean(scriptedUpsert); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(requireAlias); } } diff --git a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java index 59d2a94a21c92..0c165468dfba5 100644 --- a/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java +++ b/server/src/main/java/org/elasticsearch/bootstrap/Elasticsearch.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.SecureSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.BoundTransportAddress; +import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.core.AbstractRefCounted; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.SuppressForbidden; @@ -188,6 +189,7 @@ private static void initPhase2(Bootstrap bootstrap) throws IOException { // The following classes use MethodHandles.lookup during initialization, load them now (before SM) to be sure they succeed AbstractRefCounted.class, SubscribableListener.class, + RunOnce.class, // We eagerly initialize to work around log4j permissions & JDK-8309727 VectorUtil.class ); diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java index be44b1ec3ebb6..dba181d8e8159 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterInfo.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; @@ -48,8 +49,8 @@ public class ClusterInfo implements ChunkedToXContent, Writeable { public static final ClusterInfo EMPTY = new ClusterInfo(); - public static final TransportVersion DATA_SET_SIZE_SIZE_VERSION = TransportVersion.V_7_13_0; - public static final TransportVersion DATA_PATH_NEW_KEY_VERSION = TransportVersion.V_8_6_0; + public static final TransportVersion DATA_SET_SIZE_SIZE_VERSION = TransportVersions.V_7_13_0; + public static final TransportVersion DATA_PATH_NEW_KEY_VERSION = TransportVersions.V_8_6_0; private final Map leastAvailableSpaceUsage; private final Map mostAvailableSpaceUsage; @@ -106,14 +107,14 @@ public ClusterInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - out.writeMap(this.shardSizes, StreamOutput::writeString, (o, v) -> o.writeLong(v == null ? -1 : v)); + out.writeMap(this.leastAvailableSpaceUsage, StreamOutput::writeWriteable); + out.writeMap(this.mostAvailableSpaceUsage, StreamOutput::writeWriteable); + out.writeMap(this.shardSizes, (o, v) -> o.writeLong(v == null ? -1 : v)); if (out.getTransportVersion().onOrAfter(DATA_SET_SIZE_SIZE_VERSION)) { - out.writeMap(this.shardDataSetSizes, (o, s) -> s.writeTo(o), StreamOutput::writeLong); + out.writeMap(this.shardDataSetSizes, StreamOutput::writeWriteable, StreamOutput::writeLong); } if (out.getTransportVersion().onOrAfter(DATA_PATH_NEW_KEY_VERSION)) { - out.writeMap(this.dataPath, (o, k) -> k.writeTo(o), StreamOutput::writeString); + out.writeMap(this.dataPath, StreamOutput::writeWriteable, StreamOutput::writeString); } else { out.writeMap(this.dataPath, (o, k) -> createFakeShardRoutingFromNodeAndShard(k).writeTo(o), StreamOutput::writeString); } @@ -358,7 +359,7 @@ public record ReservedSpace(long total, Set shardIds) implements Writea public static final ReservedSpace EMPTY = new ReservedSpace(0, new HashSet<>()); ReservedSpace(StreamInput in) throws IOException { - this(in.readVLong(), in.readSet(ShardId::new)); + this(in.readVLong(), in.readCollectionAsSet(ShardId::new)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java b/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java index b656759b514ed..98dfc48dd3cd0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterSnapshotStats.java @@ -175,7 +175,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(incompleteShardSnapshotCount); out.writeVInt(deletionsInProgressCount); out.writeVInt(cleanupsInProgressCount); - out.writeList(statsByRepository); + out.writeCollection(statsByRepository); } public static ClusterSnapshotStats readFrom(StreamInput in) throws IOException { @@ -184,7 +184,7 @@ public static ClusterSnapshotStats readFrom(StreamInput in) throws IOException { in.readVInt(), in.readVInt(), in.readVInt(), - in.readList(PerRepositoryStats::readFrom) + in.readCollectionAsList(PerRepositoryStats::readFrom) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index 09170706c738b..6ca8fe26edd54 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.support.master.TransportMasterNodeAction; import org.elasticsearch.cluster.block.ClusterBlock; @@ -27,6 +28,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -48,7 +50,6 @@ import java.io.IOException; import java.util.Collections; -import java.util.Comparator; import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; @@ -128,16 +129,16 @@ default boolean isPrivate() { private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); - private static final DiffableUtils.ValueSerializer TRANSPORT_VERSION_VALUE_SERIALIZER = + private static final DiffableUtils.ValueSerializer COMPATIBILITY_VERSIONS_VALUE_SERIALIZER = new DiffableUtils.NonDiffableValueSerializer<>() { @Override - public void write(TransportVersion value, StreamOutput out) throws IOException { - TransportVersion.writeVersion(value, out); + public void write(CompatibilityVersions value, StreamOutput out) throws IOException { + TransportVersion.writeVersion(value.transportVersion(), out); } @Override - public TransportVersion read(StreamInput in, String key) throws IOException { - return TransportVersion.readVersion(in); + public CompatibilityVersions read(StreamInput in, String key) throws IOException { + return new CompatibilityVersions(TransportVersion.readVersion(in)); } }; @@ -163,8 +164,8 @@ public TransportVersion read(StreamInput in, String key) throws IOException { private final DiscoveryNodes nodes; - private final Map transportVersions; - private final TransportVersion minTransportVersion; + private final Map compatibilityVersions; + private final CompatibilityVersions minVersions; private final Metadata metadata; @@ -187,7 +188,7 @@ public ClusterState(long version, String stateUUID, ClusterState state) { state.metadata(), state.routingTable(), state.nodes(), - state.transportVersions, + state.compatibilityVersions, state.blocks(), state.customs(), false, @@ -202,7 +203,7 @@ public ClusterState( Metadata metadata, RoutingTable routingTable, DiscoveryNodes nodes, - Map transportVersions, + Map compatibilityVersions, ClusterBlocks blocks, Map customs, boolean wasReadFromDiff, @@ -214,20 +215,15 @@ public ClusterState( this.metadata = metadata; this.routingTable = routingTable; this.nodes = nodes; - this.transportVersions = Map.copyOf(transportVersions); + this.compatibilityVersions = Map.copyOf(compatibilityVersions); this.blocks = blocks; this.customs = customs; this.wasReadFromDiff = wasReadFromDiff; this.routingNodes = routingNodes; assert assertConsistentRoutingNodes(routingTable, nodes, routingNodes); - - this.minTransportVersion = blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) - ? TransportVersion.MINIMUM_COMPATIBLE - : transportVersions.values() - .stream() - .min(Comparator.naturalOrder()) - // In practice transportVersions is always nonempty (except in tests) but use a conservative default anyway: - .orElse(TransportVersion.MINIMUM_COMPATIBLE); + this.minVersions = blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) + ? new CompatibilityVersions(TransportVersions.MINIMUM_COMPATIBLE) + : CompatibilityVersions.minimumVersions(compatibilityVersions); } private static boolean assertConsistentRoutingNodes( @@ -283,12 +279,12 @@ public DiscoveryNodes nodesIfRecovered() { return blocks.hasGlobalBlock(STATE_NOT_RECOVERED_BLOCK) ? DiscoveryNodes.EMPTY_NODES : nodes; } - public Map transportVersions() { - return this.transportVersions; + public Map compatibilityVersions() { + return this.compatibilityVersions; } public TransportVersion getMinTransportVersion() { - return this.minTransportVersion; + return this.minVersions.transportVersion(); } public Metadata metadata() { @@ -477,9 +473,9 @@ public String toString() { } sb.append(blocks()); sb.append(nodes()); - if (transportVersions.isEmpty() == false) { - sb.append("transport versions:\n"); - for (var tv : transportVersions.entrySet()) { + if (compatibilityVersions.isEmpty() == false) { + sb.append("node versions:\n"); + for (var tv : compatibilityVersions.entrySet()) { sb.append(TAB).append(tv.getKey()).append(": ").append(tv.getValue()).append("\n"); } } @@ -633,21 +629,34 @@ public Iterator toXContentChunked(ToXContent.Params outerP (builder, params) -> builder.endObject() ), - // transportVersions + // transportVersions - redundant with the nodes_versions section but has to stay for backwards compatibility // just use NODES again, its node-related information chunkedSection( metrics.contains(Metric.NODES), (builder, params) -> builder.startArray("transport_versions"), - transportVersions.entrySet().iterator(), + compatibilityVersions.entrySet().iterator(), e -> Iterators.single( (builder, params) -> builder.startObject() .field("node_id", e.getKey()) - .field("transport_version", e.getValue().toString()) + .field("transport_version", e.getValue().transportVersion().toString()) .endObject() ), (builder, params) -> builder.endArray() ), + // per-node version information + chunkedSection( + metrics.contains(Metric.NODES), + (builder, params) -> builder.startArray("nodes_versions"), + compatibilityVersions.entrySet().iterator(), + e -> Iterators.single((builder, params) -> { + builder.startObject().field("node_id", e.getKey()); + e.getValue().toXContent(builder, params); + return builder.endObject(); + }), + (builder, params) -> builder.endArray() + ), + // metadata metrics.contains(Metric.METADATA) ? metadata.toXContentChunked(outerParams) : Collections.emptyIterator(), @@ -740,7 +749,7 @@ public static class Builder { private Metadata metadata = Metadata.EMPTY_METADATA; private RoutingTable routingTable = RoutingTable.EMPTY_ROUTING_TABLE; private DiscoveryNodes nodes = DiscoveryNodes.EMPTY_NODES; - private final Map transportVersions; + private final Map compatibilityVersions; private ClusterBlocks blocks = ClusterBlocks.EMPTY_CLUSTER_BLOCK; private final ImmutableOpenMap.Builder customs; private boolean fromDiff; @@ -751,7 +760,7 @@ public Builder(ClusterState state) { this.version = state.version(); this.uuid = state.stateUUID(); this.nodes = state.nodes(); - this.transportVersions = new HashMap<>(state.transportVersions); + this.compatibilityVersions = new HashMap<>(state.compatibilityVersions); this.routingTable = state.routingTable(); this.metadata = state.metadata(); this.blocks = state.blocks(); @@ -760,7 +769,7 @@ public Builder(ClusterState state) { } public Builder(ClusterName clusterName) { - this.transportVersions = new HashMap<>(); + this.compatibilityVersions = new HashMap<>(); customs = ImmutableOpenMap.builder(); this.clusterName = clusterName; } @@ -778,21 +787,21 @@ public DiscoveryNodes nodes() { return nodes; } - public Builder putTransportVersion(String nodeId, TransportVersion version) { - transportVersions.put(nodeId, Objects.requireNonNull(version, nodeId)); + public Builder putTransportVersion(String nodeId, TransportVersion transportVersion) { + compatibilityVersions.put(nodeId, new CompatibilityVersions(Objects.requireNonNull(transportVersion, nodeId))); return this; } - public Builder transportVersions(Map versions) { + public Builder compatibilityVersions(Map versions) { versions.forEach((key, value) -> Objects.requireNonNull(value, key)); // remove all versions not present in the new map - this.transportVersions.keySet().retainAll(versions.keySet()); - this.transportVersions.putAll(versions); + this.compatibilityVersions.keySet().retainAll(versions.keySet()); + this.compatibilityVersions.putAll(versions); return this; } - public Map transportVersions() { - return Collections.unmodifiableMap(this.transportVersions); + public Map compatibilityVersions() { + return Collections.unmodifiableMap(this.compatibilityVersions); } public Builder routingTable(RoutingTable.Builder routingTableBuilder) { @@ -880,7 +889,7 @@ public ClusterState build() { metadata, routingTable, nodes, - transportVersions, + compatibilityVersions, blocks, customs.build(), fromDiff, @@ -922,8 +931,8 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr builder.metadata = Metadata.readFrom(in); builder.routingTable = RoutingTable.readFrom(in); builder.nodes = DiscoveryNodes.readFrom(in, localNode); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - builder.transportVersions(in.readMap(TransportVersion::readVersion)); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + builder.compatibilityVersions(in.readMap(CompatibilityVersions::readVersion)); } else { // this clusterstate is from a pre-8.8.0 node // infer the versions from discoverynodes for now @@ -935,7 +944,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readVInt(); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } return builder.build(); @@ -945,7 +954,7 @@ public static ClusterState readFrom(StreamInput in, DiscoveryNode localNode) thr * If the cluster state does not contain transport version information, this is the version * that is inferred for all nodes on version 8.8.0 or above. */ - public static final TransportVersion INFERRED_TRANSPORT_VERSION = TransportVersion.V_8_8_0; + public static final TransportVersion INFERRED_TRANSPORT_VERSION = TransportVersions.V_8_8_0; private static TransportVersion inferTransportVersion(DiscoveryNode node) { TransportVersion tv; @@ -967,12 +976,12 @@ public void writeTo(StreamOutput out) throws IOException { metadata.writeTo(out); routingTable.writeTo(out); nodes.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - out.writeMap(transportVersions, StreamOutput::writeString, (o, v) -> TransportVersion.writeVersion(v, o)); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + out.writeMap(compatibilityVersions, (streamOutput, versions) -> versions.writeTo(streamOutput)); } blocks.writeTo(out); VersionedNamedWriteable.writeVersionedWritables(out, customs); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeVInt(-1); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } } @@ -992,7 +1001,7 @@ private static class ClusterStateDiff implements Diff { private final Diff nodes; @Nullable - private final Diff> transportVersions; + private final Diff> versions; private final Diff metadata; @@ -1007,11 +1016,11 @@ private static class ClusterStateDiff implements Diff { clusterName = after.clusterName; routingTable = after.routingTable.diff(before.routingTable); nodes = after.nodes.diff(before.nodes); - transportVersions = DiffableUtils.diff( - before.transportVersions, - after.transportVersions, + versions = DiffableUtils.diff( + before.compatibilityVersions, + after.compatibilityVersions, DiffableUtils.getStringKeySerializer(), - TRANSPORT_VERSION_VALUE_SERIALIZER + COMPATIBILITY_VERSIONS_VALUE_SERIALIZER ); metadata = after.metadata.diff(before.metadata); blocks = after.blocks.diff(before.blocks); @@ -1025,19 +1034,19 @@ private static class ClusterStateDiff implements Diff { toVersion = in.readLong(); routingTable = RoutingTable.readDiffFrom(in); nodes = DiscoveryNodes.readDiffFrom(in, localNode); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) && in.readBoolean()) { - transportVersions = DiffableUtils.readJdkMapDiff( + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) && in.readBoolean()) { + versions = DiffableUtils.readJdkMapDiff( in, DiffableUtils.getStringKeySerializer(), - TRANSPORT_VERSION_VALUE_SERIALIZER + COMPATIBILITY_VERSIONS_VALUE_SERIALIZER ); } else { - transportVersions = null; // infer at application time + versions = null; // infer at application time } metadata = Metadata.readDiffFrom(in); blocks = ClusterBlocks.readDiffFrom(in); customs = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readVInt(); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } } @@ -1050,13 +1059,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(toVersion); routingTable.writeTo(out); nodes.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - out.writeOptionalWriteable(transportVersions); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + out.writeOptionalWriteable(versions); } metadata.writeTo(out); blocks.writeTo(out); customs.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeVInt(-1); // used to be minimumMasterNodesOnPublishingMaster, which was used in 7.x for BWC with 6.x } } @@ -1075,8 +1084,8 @@ public ClusterState apply(ClusterState state) { builder.version(toVersion); builder.routingTable(routingTable.apply(state.routingTable)); builder.nodes(nodes.apply(state.nodes)); - if (transportVersions != null) { - builder.transportVersions(transportVersions.apply(state.transportVersions)); + if (versions != null) { + builder.compatibilityVersions(this.versions.apply(state.compatibilityVersions)); } else { // infer the versions from discoverynodes for now builder.nodes().getNodes().values().forEach(n -> builder.putTransportVersion(n.getId(), inferTransportVersion(n))); diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index 2bd7f971c9a55..d2ebab48142d6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -302,7 +302,7 @@ private MapDiff( ) throws IOException { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; - deletes = in.readList(keySerializer::readKey); + deletes = in.readCollectionAsList(keySerializer::readKey); int diffsCount = in.readVInt(); diffs = diffsCount == 0 ? List.of() : new ArrayList<>(diffsCount); for (int i = 0; i < diffsCount; i++) { diff --git a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java index 52a611fce5227..dc9ab61c8c5bc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java +++ b/server/src/main/java/org/elasticsearch/cluster/InternalClusterInfoService.java @@ -33,6 +33,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.store.StoreStats; @@ -387,7 +388,7 @@ private class RefreshScheduler { ActionListener getListener() { return ActionListener.running(() -> { if (shouldRefresh()) { - threadPool.scheduleUnlessShuttingDown(updateFrequency, ThreadPool.Names.SAME, () -> { + threadPool.scheduleUnlessShuttingDown(updateFrequency, EsExecutors.DIRECT_EXECUTOR_SERVICE, () -> { if (shouldRefresh()) { refreshAsync(getListener()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java index bb11f4087538d..cc94137afa322 100644 --- a/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/NodeConnectionsService.java @@ -169,7 +169,7 @@ protected void doRun() { void scheduleNextCheck() { if (connectionChecker == this) { - threadPool.scheduleUnlessShuttingDown(reconnectInterval, ThreadPool.Names.GENERIC, this); + threadPool.scheduleUnlessShuttingDown(reconnectInterval, threadPool.generic(), this); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java index 7df9bb605bc8f..d3a9397d148cf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RepositoryCleanupInProgress.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,7 +38,7 @@ public RepositoryCleanupInProgress(List entries) { } RepositoryCleanupInProgress(StreamInput in) throws IOException { - this.entries = in.readList(Entry::new); + this.entries = in.readCollectionAsList(Entry::new); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -64,7 +65,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(entries); + out.writeCollection(entries); } @Override @@ -88,7 +89,7 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } public static final class Entry implements Writeable, RepositoryOperation { diff --git a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java index 941623b95d8da..bd7a2ed1cffc0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.common.collect.Iterators; @@ -349,7 +350,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -371,7 +372,7 @@ public RestoreInProgress(StreamInput in) throws IOException { // Backwards compatibility: previously there was no logging of the start or completion of a snapshot restore quiet = true; } - List indices = in.readImmutableList(StreamInput::readString); + List indices = in.readCollectionAsImmutableList(StreamInput::readString); entriesBuilder.put( uuid, new Entry( diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java index 85e28f1a1ddfa..a4fde4993a47e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotDeletionsInProgress.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.Iterators; @@ -56,7 +57,7 @@ public static SnapshotDeletionsInProgress of(List entries) { @@ -153,7 +154,7 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(entries); + out.writeCollection(entries); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -162,7 +163,7 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOException @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -228,7 +229,7 @@ private Entry(List snapshots, String repoName, long startTime, long public Entry(StreamInput in) throws IOException { this.repoName = in.readString(); - this.snapshots = in.readImmutableList(SnapshotId::new); + this.snapshots = in.readCollectionAsImmutableList(SnapshotId::new); this.startTime = in.readVLong(); this.repositoryStateId = in.readLong(); this.state = State.readFrom(in); diff --git a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java index e15f3578bc140..0f046d4ab94f1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java +++ b/server/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState.Custom; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; @@ -192,10 +193,10 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } - private static final TransportVersion DIFFABLE_VERSION = TransportVersion.V_8_5_0; + private static final TransportVersion DIFFABLE_VERSION = TransportVersions.V_8_5_0; public static NamedDiff readDiffFrom(StreamInput in) throws IOException { if (in.getTransportVersion().onOrAfter(DIFFABLE_VERSION)) { @@ -838,13 +839,13 @@ private static Entry readFrom(StreamInput in) throws IOException { final String failure = in.readOptionalString(); final Map userMetadata = in.readMap(); final IndexVersion version = IndexVersion.readVersion(in); - final List dataStreams = in.readImmutableStringList(); + final List dataStreams = in.readStringCollectionAsImmutableList(); final SnapshotId source = in.readOptionalWriteable(SnapshotId::new); final Map clones = in.readImmutableMap( RepositoryShardId::readFrom, ShardSnapshotStatus::readFrom ); - final List featureStates = in.readImmutableList(SnapshotFeatureInfo::new); + final List featureStates = in.readCollectionAsImmutableList(SnapshotFeatureInfo::new); if (source == null) { return snapshot( snapshot, @@ -1344,7 +1345,7 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeMap(shardStatusByRepoShardId); } - out.writeList(featureStates); + out.writeCollection(featureStates); } @Override @@ -1431,7 +1432,7 @@ public RepositoryShardId readKey(StreamInput in) throws IOException { this.indexByIndexNameDiff = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_ID_VALUE_SERIALIZER); this.updatedState = State.fromValue(in.readByte()); this.updatedRepositoryStateId = in.readLong(); - this.updatedDataStreams = in.readOptionalStringList(); + this.updatedDataStreams = in.readOptionalStringCollectionAsList(); this.updatedFailure = in.readOptionalString(); this.shardsByShardIdDiff = DiffableUtils.readJdkMapDiff( in, @@ -1598,7 +1599,7 @@ private static final class SnapshotInProgressDiff implements NamedDiff { this.mapDiff = DiffableUtils.readJdkMapDiff( in, DiffableUtils.getStringKeySerializer(), - i -> new ByRepo(i.readImmutableList(Entry::readFrom)), + i -> new ByRepo(i.readCollectionAsImmutableList(Entry::readFrom)), i -> new ByRepo.ByRepoDiff( DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), Entry::readFrom, EntryDiff::new), DiffableUtils.readJdkMapDiff(i, DiffableUtils.getStringKeySerializer(), ByRepo.INT_DIFF_VALUE_SERIALIZER) @@ -1614,7 +1615,7 @@ public SnapshotsInProgress apply(Custom part) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -1661,7 +1662,7 @@ private ByRepo(List entries) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(entries); + out.writeCollection(entries); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java index afb997eeb20fa..452aae4a1c467 100644 --- a/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java +++ b/server/src/main/java/org/elasticsearch/cluster/action/index/MappingUpdatedAction.java @@ -79,7 +79,7 @@ public void setClient(Client client) { * potentially waiting for a master node to be available. */ public void updateMappingOnMaster(Index index, Mapping mappingUpdate, ActionListener listener) { - final RunOnce release = new RunOnce(() -> semaphore.release()); + final RunOnce release = new RunOnce(semaphore::release); try { semaphore.acquire(); } catch (InterruptedException e) { diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java index 2aefe30b9cb3b..6e4968c8f359a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlockException.java @@ -35,7 +35,7 @@ public ClusterBlockException(Map> indexLevelBlocks) { public ClusterBlockException(StreamInput in) throws IOException { super(in); - this.blocks = in.readImmutableSet(ClusterBlock::new); + this.blocks = in.readCollectionAsImmutableSet(ClusterBlock::new); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java index 1f2bcb827d8e0..079fac7faaaa0 100644 --- a/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java +++ b/server/src/main/java/org/elasticsearch/cluster/block/ClusterBlocks.java @@ -277,7 +277,7 @@ public String toString() { @Override public void writeTo(StreamOutput out) throws IOException { writeBlockSet(global, out); - out.writeMap(indicesBlocks, StreamOutput::writeString, (o, s) -> writeBlockSet(s, o)); + out.writeMap(indicesBlocks, (o, s) -> writeBlockSet(s, o)); } private static void writeBlockSet(Set blocks, StreamOutput out) throws IOException { @@ -294,7 +294,7 @@ public static ClusterBlocks readFrom(StreamInput in) throws IOException { } private static Set readBlockSet(StreamInput in) throws IOException { - return in.readImmutableSet(ClusterBlock::new); + return in.readCollectionAsImmutableSet(ClusterBlock::new); } public static Diff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java index 45fd9244e290a..ac0e079ef4c5e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterBootstrapService.java @@ -16,12 +16,12 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.discovery.DiscoveryModule; import org.elasticsearch.node.Node; -import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.transport.TransportService; import java.util.ArrayList; @@ -130,7 +130,11 @@ void logBootstrapState(Metadata metadata) { logger.info("this node is locked into cluster UUID [{}] and will not attempt further cluster bootstrapping", clusterUUID); } else { transportService.getThreadPool() - .scheduleWithFixedDelay(() -> logRemovalWarning(clusterUUID), TimeValue.timeValueHours(12), Names.SAME); + .scheduleWithFixedDelay( + () -> logRemovalWarning(clusterUUID), + TimeValue.timeValueHours(12), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); logRemovalWarning(clusterUUID); } } else { @@ -211,19 +215,20 @@ void scheduleUnconfiguredBootstrap() { unconfiguredBootstrapTimeout ); - transportService.getThreadPool().scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, Names.GENERIC, new Runnable() { - @Override - public void run() { - final Set discoveredNodes = getDiscoveredNodes(); - logger.debug("performing best-effort cluster bootstrapping with {}", discoveredNodes); - startBootstrap(discoveredNodes, emptyList()); - } + transportService.getThreadPool() + .scheduleUnlessShuttingDown(unconfiguredBootstrapTimeout, transportService.getThreadPool().generic(), new Runnable() { + @Override + public void run() { + final Set discoveredNodes = getDiscoveredNodes(); + logger.debug("performing best-effort cluster bootstrapping with {}", discoveredNodes); + startBootstrap(discoveredNodes, emptyList()); + } - @Override - public String toString() { - return "unconfigured-discovery delayed bootstrap"; - } - }); + @Override + public String toString() { + return "unconfigured-discovery delayed bootstrap"; + } + }); } private Set getDiscoveredNodes() { @@ -259,17 +264,18 @@ private void doBootstrap(VotingConfiguration votingConfiguration) { votingConfigurationConsumer.accept(votingConfiguration); } catch (Exception e) { logger.warn(() -> "exception when bootstrapping with " + votingConfiguration + ", rescheduling", e); - transportService.getThreadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), Names.GENERIC, new Runnable() { - @Override - public void run() { - doBootstrap(votingConfiguration); - } - - @Override - public String toString() { - return "retry of failed bootstrapping with " + votingConfiguration; - } - }); + transportService.getThreadPool() + .scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(10), transportService.getThreadPool().generic(), new Runnable() { + @Override + public void run() { + doBootstrap(votingConfiguration); + } + + @Override + public String toString() { + return "retry of failed bootstrapping with " + votingConfiguration; + } + }); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java index 50bc9479ee540..5134f153a7fbb 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ClusterFormationFailureHelper.java @@ -36,6 +36,7 @@ import java.util.Locale; import java.util.Map; import java.util.Set; +import java.util.concurrent.Executor; import java.util.function.Supplier; import static org.elasticsearch.cluster.coordination.ClusterBootstrapService.INITIAL_MASTER_NODES_SETTING; @@ -53,6 +54,7 @@ public class ClusterFormationFailureHelper { private final Supplier clusterFormationStateSupplier; private final ThreadPool threadPool; + private final Executor clusterCoordinationExecutor; private final TimeValue clusterFormationWarningTimeout; private final Runnable logLastFailedJoinAttempt; @Nullable // if no warning is scheduled @@ -66,6 +68,7 @@ public ClusterFormationFailureHelper( ) { this.clusterFormationStateSupplier = clusterFormationStateSupplier; this.threadPool = threadPool; + this.clusterCoordinationExecutor = threadPool.executor(Names.CLUSTER_COORDINATION); this.clusterFormationWarningTimeout = DISCOVERY_CLUSTER_FORMATION_WARNING_TIMEOUT_SETTING.get(settings); this.logLastFailedJoinAttempt = logLastFailedJoinAttempt; } @@ -91,7 +94,7 @@ private boolean isActive() { } void scheduleNextWarning() { - threadPool.scheduleUnlessShuttingDown(clusterFormationWarningTimeout, Names.CLUSTER_COORDINATION, new AbstractRunnable() { + threadPool.scheduleUnlessShuttingDown(clusterFormationWarningTimeout, clusterCoordinationExecutor, new AbstractRunnable() { @Override public void onFailure(Exception e) { logger.debug("unexpected exception scheduling cluster formation warning", e); @@ -204,19 +207,19 @@ private static boolean calculateHasDiscoveredQuorum( public ClusterFormationState(StreamInput in) throws IOException { this( - in.readStringList(), + in.readStringCollectionAsList(), new DiscoveryNode(in), in.readMap(DiscoveryNode::new), in.readLong(), in.readLong(), new VotingConfiguration(in), new VotingConfiguration(in), - in.readImmutableList(TransportAddress::new), - in.readImmutableList(DiscoveryNode::new), + in.readCollectionAsImmutableList(TransportAddress::new), + in.readCollectionAsImmutableList(DiscoveryNode::new), in.readLong(), in.readBoolean(), new StatusInfo(in), - in.readList(JoinStatus::new) + in.readCollectionAsList(JoinStatus::new) ); } @@ -378,13 +381,13 @@ private static String timeValueWithMillis(TimeValue timeValue) { public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(initialMasterNodesSetting); localNode.writeTo(out); - out.writeMap(masterEligibleNodes, StreamOutput::writeString, (streamOutput, node) -> node.writeTo(streamOutput)); + out.writeMap(masterEligibleNodes, StreamOutput::writeWriteable); out.writeLong(clusterStateVersion); out.writeLong(acceptedTerm); lastAcceptedConfiguration.writeTo(out); lastCommittedConfiguration.writeTo(out); - out.writeList(resolvedAddresses); - out.writeList(foundPeers); + out.writeCollection(resolvedAddresses); + out.writeCollection(foundPeers); out.writeLong(currentTerm); out.writeBoolean(hasDiscoveredQuorum); statusInfo.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java index bd2fccd70f770..1c2f120c6c925 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationDiagnosticsService.java @@ -1169,7 +1169,7 @@ public void run() { public String toString() { return "delayed retrieval of coordination diagnostics info from " + masterEligibleNode; } - }, remoteRequestInitialDelay, ThreadPool.Names.CLUSTER_COORDINATION); + }, remoteRequestInitialDelay, clusterCoordinationExecutor); } void cancelPollingRemoteMasterStabilityDiagnostic() { @@ -1285,7 +1285,7 @@ private static List readRecentMasters(StreamInput in) throws IOEx boolean hasRecentMasters = in.readBoolean(); List recentMasters; if (hasRecentMasters) { - recentMasters = in.readImmutableList(DiscoveryNode::new); + recentMasters = in.readCollectionAsImmutableList(DiscoveryNode::new); } else { recentMasters = null; } @@ -1323,7 +1323,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeList(recentMasters); + out.writeCollection(recentMasters); } out.writeOptionalString(remoteExceptionMessage); out.writeOptionalString(remoteExceptionStackTrace); @@ -1331,7 +1331,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeMap(nodeToClusterFormationDescriptionMap, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(nodeToClusterFormationDescriptionMap, StreamOutput::writeString); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetadata.java b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetadata.java index bae6c001f7df7..0a859159c7843 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/CoordinationMetadata.java @@ -98,7 +98,7 @@ public CoordinationMetadata(StreamInput in) throws IOException { term = in.readLong(); lastCommittedConfiguration = new VotingConfiguration(in); lastAcceptedConfiguration = new VotingConfiguration(in); - votingConfigExclusions = in.readImmutableSet(VotingConfigExclusion::new); + votingConfigExclusions = in.readCollectionAsImmutableSet(VotingConfigExclusion::new); } public static Builder builder() { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java index 5d67a4f03fed8..dc52791a5d5e2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/Coordinator.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.component.AbstractLifecycleComponent; @@ -208,7 +209,8 @@ public Coordinator( CircuitBreakerService circuitBreakerService, Reconfigurator reconfigurator, LeaderHeartbeatService leaderHeartbeatService, - PreVoteCollector.Factory preVoteCollectorFactory + PreVoteCollector.Factory preVoteCollectorFactory, + CompatibilityVersions compatibilityVersions ) { this.settings = settings; this.transportService = transportService; @@ -232,7 +234,8 @@ public Coordinator( joinReasonService, circuitBreakerService, reconfigurator::maybeReconfigureAfterNewMasterIsElected, - this::getLatestStoredStateAfterWinningAnElection + this::getLatestStoredStateAfterWinningAnElection, + compatibilityVersions ); this.joinValidationService = new JoinValidationService( settings, @@ -769,7 +772,7 @@ private void processJoinRequest(JoinRequest joinRequest, ActionListener jo && optionalJoin.stream().allMatch(j -> j.getTerm() <= getCurrentTerm()); optionalJoin.ifPresent(this::handleJoin); - joinAccumulator.handleJoinRequest(joinRequest.getSourceNode(), joinRequest.getTransportVersion(), joinListener); + joinAccumulator.handleJoinRequest(joinRequest.getSourceNode(), joinRequest.getCompatibilityVersions(), joinListener); if (prevElectionWon == false && coordState.electionWon()) { becomeLeader(); @@ -789,14 +792,33 @@ private void updateSingleNodeClusterChecker() { singleNodeClusterChecker = transportService.getThreadPool().scheduleWithFixedDelay(new Runnable() { @Override public void run() { - Coordinator.this.checkSingleNodeCluster(); + synchronized (mutex) { + if (mode != Mode.LEADER || applierState.nodes().size() > 1) { + return; + } + } + + if (DISCOVERY_SEED_HOSTS_SETTING.exists(settings) + && DISCOVERY_SEED_HOSTS_SETTING.get(settings).isEmpty() == false) { + logger.warn( + """ + This node is a fully-formed single-node cluster with cluster UUID [{}], but it is configured as if to \ + discover other nodes and form a multi-node cluster via the [{}={}] setting. Fully-formed clusters do \ + not attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same \ + cluster. The cluster UUID persists across restarts and can only be changed by deleting the contents of \ + the node's data path(s). Remove the discovery configuration to suppress this message.""", + applierState.metadata().clusterUUID(), + DISCOVERY_SEED_HOSTS_SETTING.getKey(), + DISCOVERY_SEED_HOSTS_SETTING.get(settings) + ); + } } @Override public String toString() { return "single-node cluster checker"; } - }, this.singleNodeClusterSeedHostsCheckInterval, Names.SAME); + }, singleNodeClusterSeedHostsCheckInterval, clusterCoordinationExecutor); } return; } @@ -808,30 +830,6 @@ public String toString() { } } - private void checkSingleNodeCluster() { - if (mode != Mode.LEADER || applierState.nodes().size() > 1) { - return; - } - - if (DISCOVERY_SEED_HOSTS_SETTING.exists(settings)) { - if (DISCOVERY_SEED_HOSTS_SETTING.get(settings).isEmpty()) { - // For a single-node cluster, the only acceptable setting is an empty list. - return; - } else { - logger.warn( - """ - This node is a fully-formed single-node cluster with cluster UUID [{}], but it is configured as if to \ - discover other nodes and form a multi-node cluster via the [{}] setting. Fully-formed clusters do not \ - attempt to discover other nodes, and nodes with different cluster UUIDs cannot belong to the same cluster. \ - The cluster UUID persists across restarts and can only be changed by deleting the contents of the node's \ - data path(s). Remove the discovery configuration to suppress this message.""", - applierState.metadata().clusterUUID(), - DISCOVERY_SEED_HOSTS_SETTING.getKey() + "=" + DISCOVERY_SEED_HOSTS_SETTING.get(settings) - ); - } - } - } - void becomeCandidate(String method) { assert Thread.holdsLock(mutex) : "Coordinator mutex not held"; logger.debug( @@ -1904,7 +1902,7 @@ public void run() { public String toString() { return "scheduled timeout for " + CoordinatorPublication.this; } - }, publishTimeout, Names.CLUSTER_COORDINATION); + }, publishTimeout, clusterCoordinationExecutor); this.infoTimeoutHandler = transportService.getThreadPool().schedule(new Runnable() { @Override @@ -1918,7 +1916,7 @@ public void run() { public String toString() { return "scheduled timeout for reporting on " + CoordinatorPublication.this; } - }, publishInfoTimeout, Names.CLUSTER_COORDINATION); + }, publishInfoTimeout, clusterCoordinationExecutor); } private void removePublicationAndPossiblyBecomeCandidate(String reason) { diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactory.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactory.java index c25ba41d36274..d0b80a9739f7b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactory.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactory.java @@ -21,6 +21,7 @@ import org.elasticsearch.threadpool.ThreadPool.Names; import java.util.Random; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; @@ -94,11 +95,13 @@ public class ElectionSchedulerFactory { private final TimeValue maxTimeout; private final TimeValue duration; private final ThreadPool threadPool; + private final Executor clusterCoordinationExecutor; private final Random random; public ElectionSchedulerFactory(Settings settings, Random random, ThreadPool threadPool) { this.random = random; this.threadPool = threadPool; + this.clusterCoordinationExecutor = threadPool.executor(Names.CLUSTER_COORDINATION); initialTimeout = ELECTION_INITIAL_TIMEOUT_SETTING.get(settings); backoffTime = ELECTION_BACK_OFF_TIME_SETTING.get(settings); @@ -218,7 +221,7 @@ public String toString() { }; logger.debug("scheduling {}", runnable); - threadPool.scheduleUnlessShuttingDown(TimeValue.timeValueMillis(delayMillis), Names.CLUSTER_COORDINATION, runnable); + threadPool.scheduleUnlessShuttingDown(TimeValue.timeValueMillis(delayMillis), clusterCoordinationExecutor, runnable); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java index 8b76b599b97b7..ad2faaccf0e96 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/FollowersChecker.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.NodeHealthService; @@ -413,7 +414,7 @@ public void run() { public String toString() { return FollowerChecker.this + "::handleWakeUp"; } - }, followerCheckInterval, Names.SAME); + }, followerCheckInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java index a147fbf9e9ccb..ce2754fa3854c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinHelper.java @@ -11,7 +11,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ChannelActionListener; import org.elasticsearch.cluster.ClusterState; @@ -23,6 +22,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.service.MasterServiceTaskQueue; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.Strings; import org.elasticsearch.common.breaker.CircuitBreaker; @@ -76,6 +76,7 @@ public class JoinHelper { private final JoinReasonService joinReasonService; private final CircuitBreakerService circuitBreakerService; private final ObjLongConsumer> latestStoredStateSupplier; + private final CompatibilityVersions compatibilityVersions; private final Map, PendingJoinInfo> pendingOutgoingJoins = ConcurrentCollections.newConcurrentMap(); private final AtomicReference lastFailedJoinAttempt = new AtomicReference<>(); @@ -94,7 +95,8 @@ public class JoinHelper { JoinReasonService joinReasonService, CircuitBreakerService circuitBreakerService, Function maybeReconfigureAfterMasterElection, - ObjLongConsumer> latestStoredStateSupplier + ObjLongConsumer> latestStoredStateSupplier, + CompatibilityVersions compatibilityVersions ) { this.joinTaskQueue = masterService.createTaskQueue( "node-join", @@ -108,6 +110,7 @@ public class JoinHelper { this.nodeHealthService = nodeHealthService; this.joinReasonService = joinReasonService; this.latestStoredStateSupplier = latestStoredStateSupplier; + this.compatibilityVersions = compatibilityVersions; transportService.registerRequestHandler( JOIN_ACTION_NAME, @@ -235,7 +238,7 @@ public void sendJoinRequest(DiscoveryNode destination, long term, Optional logger.debug("dropping join request to [{}]: [{}]", destination, statusInfo.getInfo()); return; } - final JoinRequest joinRequest = new JoinRequest(transportService.getLocalNode(), TransportVersion.current(), term, optionalJoin); + final JoinRequest joinRequest = new JoinRequest(transportService.getLocalNode(), compatibilityVersions, term, optionalJoin); final Tuple dedupKey = Tuple.tuple(destination, joinRequest); final var pendingJoinInfo = new PendingJoinInfo(transportService.getThreadPool().relativeTimeInMillis()); if (pendingOutgoingJoins.putIfAbsent(dedupKey, pendingJoinInfo) == null) { @@ -401,17 +404,21 @@ List getInFlightJoinStatuses() { } interface JoinAccumulator { - void handleJoinRequest(DiscoveryNode sender, TransportVersion transportVersion, ActionListener joinListener); + void handleJoinRequest(DiscoveryNode sender, CompatibilityVersions compatibilityVersions, ActionListener joinListener); default void close(Mode newMode) {} } class LeaderJoinAccumulator implements JoinAccumulator { @Override - public void handleJoinRequest(DiscoveryNode sender, TransportVersion transportVersion, ActionListener joinListener) { + public void handleJoinRequest( + DiscoveryNode sender, + CompatibilityVersions compatibilityVersions, + ActionListener joinListener + ) { final JoinTask task = JoinTask.singleNode( sender, - transportVersion, + compatibilityVersions, joinReasonService.getJoinReason(sender, Mode.LEADER), joinListener, currentTermSupplier.getAsLong() @@ -427,7 +434,11 @@ public String toString() { static class InitialJoinAccumulator implements JoinAccumulator { @Override - public void handleJoinRequest(DiscoveryNode sender, TransportVersion transportVersion, ActionListener joinListener) { + public void handleJoinRequest( + DiscoveryNode sender, + CompatibilityVersions compatibilityVersions, + ActionListener joinListener + ) { assert false : "unexpected join from " + sender + " during initialisation"; joinListener.onFailure(new CoordinationStateRejectedException("join target is not initialised yet")); } @@ -440,7 +451,11 @@ public String toString() { static class FollowerJoinAccumulator implements JoinAccumulator { @Override - public void handleJoinRequest(DiscoveryNode sender, TransportVersion transportVersion, ActionListener joinListener) { + public void handleJoinRequest( + DiscoveryNode sender, + CompatibilityVersions compatibilityVersions, + ActionListener joinListener + ) { joinListener.onFailure(new CoordinationStateRejectedException("join target is a follower")); } @@ -452,13 +467,17 @@ public String toString() { class CandidateJoinAccumulator implements JoinAccumulator { - private final Map>> joinRequestAccumulator = new HashMap<>(); + private final Map>> joinRequestAccumulator = new HashMap<>(); boolean closed; @Override - public void handleJoinRequest(DiscoveryNode sender, TransportVersion transportVersion, ActionListener joinListener) { + public void handleJoinRequest( + DiscoveryNode sender, + CompatibilityVersions compatibilityVersions, + ActionListener joinListener + ) { assert closed == false : "CandidateJoinAccumulator closed"; - var prev = joinRequestAccumulator.put(sender, Tuple.tuple(transportVersion, joinListener)); + var prev = joinRequestAccumulator.put(sender, Tuple.tuple(compatibilityVersions, joinListener)); if (prev != null) { prev.v2().onFailure(new CoordinationStateRejectedException("received a newer join from " + sender)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java index 45335689a6635..3d70fca6723af 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinRequest.java @@ -8,7 +8,9 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.transport.TransportRequest; @@ -25,9 +27,9 @@ public class JoinRequest extends TransportRequest { private final DiscoveryNode sourceNode; /** - * The transport version used by the sending node. + * The compatibility versions used by the sending node. */ - private final TransportVersion transportVersion; + private final CompatibilityVersions compatibilityVersions; /** * The minimum term for which the joining node will accept any cluster state publications. If the joining node is in a strictly greater @@ -44,10 +46,15 @@ public class JoinRequest extends TransportRequest { */ private final Optional optionalJoin; - public JoinRequest(DiscoveryNode sourceNode, TransportVersion transportVersion, long minimumTerm, Optional optionalJoin) { + public JoinRequest( + DiscoveryNode sourceNode, + CompatibilityVersions compatibilityVersions, + long minimumTerm, + Optional optionalJoin + ) { assert optionalJoin.isPresent() == false || optionalJoin.get().getSourceNode().equals(sourceNode); this.sourceNode = sourceNode; - this.transportVersion = transportVersion; + this.compatibilityVersions = compatibilityVersions; this.minimumTerm = minimumTerm; this.optionalJoin = optionalJoin; } @@ -55,11 +62,11 @@ public JoinRequest(DiscoveryNode sourceNode, TransportVersion transportVersion, public JoinRequest(StreamInput in) throws IOException { super(in); sourceNode = new DiscoveryNode(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - transportVersion = TransportVersion.readVersion(in); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + compatibilityVersions = CompatibilityVersions.readVersion(in); } else { // there's a 1-1 mapping from Version to TransportVersion before 8.8.0 - transportVersion = TransportVersion.fromId(sourceNode.getVersion().id); + compatibilityVersions = new CompatibilityVersions(TransportVersion.fromId(sourceNode.getVersion().id)); } minimumTerm = in.readLong(); optionalJoin = Optional.ofNullable(in.readOptionalWriteable(Join::new)); @@ -69,8 +76,8 @@ public JoinRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); sourceNode.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - TransportVersion.writeVersion(transportVersion, out); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + compatibilityVersions.writeTo(out); } out.writeLong(minimumTerm); out.writeOptionalWriteable(optionalJoin.orElse(null)); @@ -80,8 +87,8 @@ public DiscoveryNode getSourceNode() { return sourceNode; } - public TransportVersion getTransportVersion() { - return transportVersion; + public CompatibilityVersions getCompatibilityVersions() { + return compatibilityVersions; } public long getMinimumTerm() { @@ -108,13 +115,13 @@ public boolean equals(Object o) { if (minimumTerm != that.minimumTerm) return false; if (sourceNode.equals(that.sourceNode) == false) return false; - if (transportVersion.equals(that.transportVersion) == false) return false; + if (compatibilityVersions.equals(that.compatibilityVersions) == false) return false; return optionalJoin.equals(that.optionalJoin); } @Override public int hashCode() { - return Objects.hash(sourceNode, transportVersion, minimumTerm, optionalJoin); + return Objects.hash(sourceNode, compatibilityVersions, minimumTerm, optionalJoin); } @Override @@ -122,8 +129,8 @@ public String toString() { return "JoinRequest{" + "sourceNode=" + sourceNode - + ", transportVersion=" - + transportVersion + + ", compatibilityVersions=" + + compatibilityVersions + ", minimumTerm=" + minimumTerm + ", optionalJoin=" diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java index 486627c9533c9..ac1c4e888e6ca 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinTask.java @@ -8,11 +8,11 @@ package org.elasticsearch.cluster.coordination; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskListener; import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.collect.Iterators; import java.util.List; @@ -25,12 +25,12 @@ public record JoinTask(List nodeJoinTasks, boolean isBecomingMaste public static JoinTask singleNode( DiscoveryNode node, - TransportVersion transportVersion, + CompatibilityVersions compatibilityVersions, JoinReason reason, ActionListener listener, long term ) { - return new JoinTask(List.of(new NodeJoinTask(node, transportVersion, reason, listener)), false, term, null); + return new JoinTask(List.of(new NodeJoinTask(node, compatibilityVersions, reason, listener)), false, term, null); } public static JoinTask completingElection(Stream nodeJoinTaskStream, long term) { @@ -75,11 +75,21 @@ public JoinTask alsoRefreshState(ClusterState latestState) { return new JoinTask(nodeJoinTasks, isBecomingMaster, term, latestState); } - public record NodeJoinTask(DiscoveryNode node, TransportVersion transportVersion, JoinReason reason, ActionListener listener) { + public record NodeJoinTask( + DiscoveryNode node, + CompatibilityVersions compatibilityVersions, + JoinReason reason, + ActionListener listener + ) { - public NodeJoinTask(DiscoveryNode node, TransportVersion transportVersion, JoinReason reason, ActionListener listener) { + public NodeJoinTask( + DiscoveryNode node, + CompatibilityVersions compatibilityVersions, + JoinReason reason, + ActionListener listener + ) { this.node = Objects.requireNonNull(node); - this.transportVersion = Objects.requireNonNull(transportVersion); + this.compatibilityVersions = Objects.requireNonNull(compatibilityVersions); this.reason = reason; this.listener = listener; } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java index 57831d82f1052..248137f03fcaa 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/JoinValidationService.java @@ -375,7 +375,7 @@ public void run() { public String toString() { return cacheClearer + " after timeout"; } - }, cacheTimeout, ThreadPool.Names.CLUSTER_COORDINATION); + }, cacheTimeout, responseExecutor); } } catch (Exception e) { assert e instanceof EsRejectedExecutionException esre && esre.isExecutorShutdown() : e; diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java index 5e21f7c693413..f2c45ebc6e748 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LagDetector.java @@ -38,6 +38,7 @@ import java.util.List; import java.util.Map; import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Supplier; @@ -65,6 +66,7 @@ public class LagDetector { private final LagListener lagListener; private final Supplier localNodeSupplier; private final ThreadPool threadPool; + private final Executor clusterCoordinationExecutor; private final Map appliedStateTrackersByNode = newConcurrentMap(); public LagDetector( @@ -74,6 +76,7 @@ public LagDetector( final Supplier localNodeSupplier ) { this.threadPool = threadPool; + this.clusterCoordinationExecutor = threadPool.executor(Names.CLUSTER_COORDINATION); this.clusterStateApplicationTimeout = CLUSTER_FOLLOWER_LAG_TIMEOUT_SETTING.get(settings); this.lagListener = lagListener; this.localNodeSupplier = localNodeSupplier; @@ -112,7 +115,7 @@ public void startLagDetector(final long version) { } else { logger.debug("starting lag detector for version {}: {}", version, laggingTrackers); - threadPool.scheduleUnlessShuttingDown(clusterStateApplicationTimeout, Names.CLUSTER_COORDINATION, new Runnable() { + threadPool.scheduleUnlessShuttingDown(clusterStateApplicationTimeout, clusterCoordinationExecutor, new Runnable() { @Override public void run() { laggingTrackers.forEach(t -> t.checkForLag(version)); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java index da8ff932a8aed..8a20e8e56d751 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/LeaderChecker.java @@ -19,6 +19,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Releasable; @@ -392,7 +393,7 @@ public void run() { public String toString() { return "scheduled check of leader " + leader; } - }, leaderCheckInterval, Names.SAME); + }, leaderCheckInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index e9529f9cdca16..55cf6ea8a398d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.index.IndexVersion; @@ -120,7 +121,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(newState.nodes()); - Map transportVersions = new HashMap<>(newState.transportVersions()); + Map compatibilityVersionsMap = new HashMap<>(newState.compatibilityVersions()); assert nodesBuilder.isLocalNodeElectedMaster(); @@ -139,18 +140,18 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex logger.debug("received a join request for an existing node [{}]", node); } else { try { - TransportVersion transportVersion = nodeJoinTask.transportVersion(); + CompatibilityVersions compatibilityVersions = nodeJoinTask.compatibilityVersions(); if (enforceVersionBarrier) { ensureVersionBarrier(node.getVersion(), minClusterNodeVersion); - ensureTransportVersionBarrier(transportVersion, transportVersions.values()); + ensureTransportVersionBarrier(compatibilityVersions, compatibilityVersionsMap.values()); } - blockForbiddenVersions(transportVersion); + blockForbiddenVersions(compatibilityVersions.transportVersion()); ensureNodesCompatibility(node.getVersion(), minClusterNodeVersion, maxClusterNodeVersion); // we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices // we have to reject nodes that don't support all indices we have in this cluster ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata()); nodesBuilder.add(node); - transportVersions.put(node.getId(), transportVersion); + compatibilityVersionsMap.put(node.getId(), compatibilityVersions); nodesChanged = true; minClusterNodeVersion = Version.min(minClusterNodeVersion, node.getVersion()); maxClusterNodeVersion = Version.max(maxClusterNodeVersion, node.getVersion()); @@ -221,7 +222,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } final ClusterState clusterStateWithNewNodesAndDesiredNodes = DesiredNodes.updateDesiredNodesStatusIfNeeded( - newState.nodes(nodesBuilder).transportVersions(transportVersions).build() + newState.nodes(nodesBuilder).compatibilityVersions(compatibilityVersionsMap).build() ); final ClusterState updatedState = allocationService.adaptAutoExpandReplicas(clusterStateWithNewNodesAndDesiredNodes); assert enforceVersionBarrier == false @@ -239,9 +240,9 @@ public ClusterState execute(BatchExecutionContext batchExecutionContex } } - @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( @@ -265,8 +266,9 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( assert currentState.term() < term : term + " vs " + currentState; DiscoveryNodes currentNodes = currentState.nodes(); DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(currentNodes); - Map transportVersions = new HashMap<>(getTransportVersions(currentState)); + Map compatibilityVersions = new HashMap<>(getCompatibilityVersions(currentState)); nodesBuilder.masterNodeId(currentState.nodes().getLocalNodeId()); + nodesBuilder.resetNodeLeftGeneration(); for (final var taskContext : taskContexts) { for (final var joiningNode : taskContext.getTask().nodes()) { @@ -274,7 +276,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( if (nodeWithSameId != null && nodeWithSameId.equals(joiningNode) == false) { logger.debug("removing existing node [{}], which conflicts with incoming join from [{}]", nodeWithSameId, joiningNode); nodesBuilder.remove(nodeWithSameId.getId()); - transportVersions.remove(nodeWithSameId.getId()); + compatibilityVersions.remove(nodeWithSameId.getId()); } final DiscoveryNode nodeWithSameAddress = currentNodes.findByAddress(joiningNode.getAddress()); if (nodeWithSameAddress != null && nodeWithSameAddress.equals(joiningNode) == false) { @@ -284,7 +286,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( joiningNode ); nodesBuilder.remove(nodeWithSameAddress.getId()); - transportVersions.remove(nodeWithSameAddress.getId()); + compatibilityVersions.remove(nodeWithSameAddress.getId()); } } } @@ -293,7 +295,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( // or removed by us above ClusterState tmpState = ClusterState.builder(currentState) .nodes(nodesBuilder) - .transportVersions(transportVersions) + .compatibilityVersions(compatibilityVersions) .blocks(ClusterBlocks.builder().blocks(currentState.blocks()).removeGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_ID)) .metadata( Metadata.builder(currentState.metadata()) @@ -393,16 +395,17 @@ public static void ensureNodesCompatibility(Version joiningNodeVersion, Version * to ensure that the minimum transport version of the cluster doesn't go backwards. **/ static void ensureTransportVersionBarrier( - TransportVersion joiningTransportVersion, - Collection existingTransportVersions + CompatibilityVersions joiningCompatibilityVersions, + Collection existingTransportVersions ) { TransportVersion minClusterTransportVersion = existingTransportVersions.stream() + .map(CompatibilityVersions::transportVersion) .min(Comparator.naturalOrder()) .orElse(TransportVersion.current()); - if (joiningTransportVersion.before(minClusterTransportVersion)) { + if (joiningCompatibilityVersions.transportVersion().before(minClusterTransportVersion)) { throw new IllegalStateException( "node with transport version [" - + joiningTransportVersion + + joiningCompatibilityVersions.transportVersion() + "] may not join a cluster with minimum transport version [" + minClusterTransportVersion + "]" diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 995066106e8ca..68c611aeef9a6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -50,23 +50,23 @@ public NodeLeftExecutor(AllocationService allocationService) { this.allocationService = allocationService; } - @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } @Override public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { ClusterState initialState = batchExecutionContext.initialState(); DiscoveryNodes.Builder remainingNodesBuilder = DiscoveryNodes.builder(initialState.nodes()); - Map transportVersions = new HashMap<>(getTransportVersions(initialState)); + Map compatibilityVersions = new HashMap<>(getCompatibilityVersions(initialState)); boolean removed = false; for (final var taskContext : batchExecutionContext.taskContexts()) { final var task = taskContext.getTask(); final String reason; if (initialState.nodes().nodeExists(task.node())) { remainingNodesBuilder.remove(task.node()); - transportVersions.remove(task.node().getId()); + compatibilityVersions.remove(task.node().getId()); removed = true; reason = task.reason(); } else { @@ -89,7 +89,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) t try (var ignored = batchExecutionContext.dropHeadersContext()) { // suppress deprecation warnings e.g. from reroute() - final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder, transportVersions); + final var remainingNodesClusterState = remainingNodesClusterState(initialState, remainingNodesBuilder, compatibilityVersions); final var ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); return allocationService.disassociateDeadNodes( ptasksDisassociatedState, @@ -105,9 +105,9 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) t protected ClusterState remainingNodesClusterState( ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder, - Map transportVersions + Map compatibilityVersions ) { - return ClusterState.builder(currentState).nodes(remainingNodesBuilder).transportVersions(transportVersions).build(); + return ClusterState.builder(currentState).nodes(remainingNodesBuilder).compatibilityVersions(compatibilityVersions).build(); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PeersResponse.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PeersResponse.java index 6afd06018939f..e00edafba44a5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PeersResponse.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PeersResponse.java @@ -32,7 +32,7 @@ public PeersResponse(Optional masterNode, List kno public PeersResponse(StreamInput in) throws IOException { masterNode = Optional.ofNullable(in.readOptionalWriteable(DiscoveryNode::new)); - knownPeers = in.readImmutableList(DiscoveryNode::new); + knownPeers = in.readCollectionAsImmutableList(DiscoveryNode::new); term = in.readLong(); assert masterNode.isPresent() == false || knownPeers.isEmpty(); } @@ -40,7 +40,7 @@ public PeersResponse(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(masterNode.orElse(null)); - out.writeList(knownPeers); + out.writeCollection(knownPeers); out.writeLong(term); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java index 4cf5cd6787ab6..45079b2bccd60 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/PublicationTransportHandler.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRunnable; import org.elasticsearch.cluster.ClusterState; @@ -89,7 +90,7 @@ public class PublicationTransportHandler { TransportRequestOptions.Type.STATE ); - public static final TransportVersion INCLUDES_LAST_COMMITTED_DATA_VERSION = TransportVersion.V_8_6_0; + public static final TransportVersion INCLUDES_LAST_COMMITTED_DATA_VERSION = TransportVersions.V_8_6_0; private final SerializationStatsTracker serializationStatsTracker = new SerializationStatsTracker(); diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java index 42fbd0b294a8a..5dea832cc6ad3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/StableMasterHealthIndicatorService.java @@ -102,6 +102,11 @@ public String name() { return NAME; } + @Override + public boolean isPreflight() { + return true; + } + @Override public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { CoordinationDiagnosticsService.CoordinationDiagnosticsResult coordinationDiagnosticsResult = coordinationDiagnosticsService diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java index d2f9f18466aa1..c6463949f774f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ValidateJoinRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.bytes.BytesReference; @@ -28,7 +29,7 @@ public class ValidateJoinRequest extends TransportRequest { public ValidateJoinRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { // recent versions send a BytesTransportRequest containing a compressed representation of the state final var bytes = in.readReleasableBytesReference(); final var version = in.getTransportVersion(); @@ -67,7 +68,7 @@ public ValidateJoinRequest(ClusterState state) { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().before(TransportVersion.V_8_3_0); + assert out.getTransportVersion().before(TransportVersions.V_8_3_0); super.writeTo(out); stateSupplier.get().writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java index 6555054fd9973..0ea515012a190 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/stateless/StoreHeartbeatService.java @@ -123,7 +123,7 @@ private class HeartbeatTask extends ActionRunnable { assert 0 < heartbeatTerm : heartbeatTerm; this.heartbeatTerm = heartbeatTerm; this.rerunListener = listener.delegateFailureAndWrap( - (l, scheduleDelay) -> threadPool.schedule(HeartbeatTask.this, scheduleDelay, ThreadPool.Names.GENERIC) + (l, scheduleDelay) -> threadPool.schedule(HeartbeatTask.this, scheduleDelay, threadPool.generic()) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java index 67582a5e27559..71966deb076b6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -87,12 +88,12 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.componentTemplates, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + out.writeMap(this.componentTemplates, StreamOutput::writeWriteable); } public static ComponentTemplateMetadata fromXContent(XContentParser parser) throws IOException { @@ -164,7 +165,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java index c465bc492ed06..bd745e7ff4ea6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplate.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; @@ -161,20 +161,20 @@ public ComposableIndexTemplate( } public ComposableIndexTemplate(StreamInput in) throws IOException { - this.indexPatterns = in.readStringList(); + this.indexPatterns = in.readStringCollectionAsList(); if (in.readBoolean()) { this.template = new Template(in); } else { this.template = null; } - this.componentTemplates = in.readOptionalStringList(); + this.componentTemplates = in.readOptionalStringCollectionAsList(); this.priority = in.readOptionalVLong(); this.version = in.readOptionalVLong(); this.metadata = in.readMap(); this.dataStreamTemplate = in.readOptionalWriteable(DataStreamTemplate::new); this.allowAutoCreate = in.readOptionalBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - this.ignoreMissingComponentTemplates = in.readOptionalStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { + this.ignoreMissingComponentTemplates = in.readOptionalStringCollectionAsList(); } else { this.ignoreMissingComponentTemplates = null; } @@ -265,7 +265,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(this.metadata); out.writeOptionalWriteable(dataStreamTemplate); out.writeOptionalBoolean(allowAutoCreate); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalStringCollection(ignoreMissingComponentTemplates); } } @@ -394,12 +394,12 @@ public DataStreamTemplate(boolean hidden, boolean allowCustomRouting) { DataStreamTemplate(StreamInput in) throws IOException { hidden = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { allowCustomRouting = in.readBoolean(); } else { allowCustomRouting = false; } - if (in.getTransportVersion().between(TransportVersion.V_8_1_0, TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().between(TransportVersions.V_8_1_0, TransportVersions.V_8_3_0)) { // Accidentally included index_mode to binary node to node protocol in previous releases. // (index_mode is removed and was part of code based when tsdb was behind a feature flag) // (index_mode was behind a feature in the xcontent parser, so it could never actually used) @@ -438,10 +438,10 @@ public boolean isAllowCustomRouting() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hidden); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeBoolean(allowCustomRouting); } - if (out.getTransportVersion().between(TransportVersion.V_8_1_0, TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().between(TransportVersions.V_8_1_0, TransportVersions.V_8_3_0)) { // See comment in constructor. out.writeBoolean(false); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java index 6da5bf26e427f..8fd606048d539 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -92,12 +93,12 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.indexTemplates, StreamOutput::writeString, (outstream, val) -> val.writeTo(outstream)); + out.writeMap(this.indexTemplates, StreamOutput::writeWriteable); } @Override @@ -165,7 +166,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java index 40142a2246b8c..c5cf0b29f6273 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStream.java @@ -11,7 +11,7 @@ import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.PointValues; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.DocWriteRequest; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; @@ -144,6 +144,7 @@ public DataStream( ) { this.name = name; this.indices = List.copyOf(indices); + assert indices.isEmpty() == false; this.generation = generation; this.metadata = metadata; assert system == false || hidden; // system indices must be hidden @@ -786,15 +787,15 @@ public DataStream(StreamInput in) throws IOException { in.readBoolean(), in.readBoolean(), in.readBoolean(), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readBoolean() : false, - in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null + in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) ? in.readOptionalEnum(IndexMode.class) : null, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) ? in.readOptionalWriteable(DataStreamLifecycle::new) : null ); } static List readIndices(StreamInput in) throws IOException { in.readString(); // timestamp field, which is always @timestamp - return in.readImmutableList(Index::new); + return in.readCollectionAsImmutableList(Index::new); } public static Diff readDiffFrom(StreamInput in) throws IOException { @@ -805,19 +806,19 @@ public static Diff readDiffFrom(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(TIMESTAMP_FIELD_NAME); - out.writeList(indices); + out.writeCollection(indices); out.writeVLong(generation); out.writeGenericMap(metadata); out.writeBoolean(hidden); out.writeBoolean(replicated); out.writeBoolean(system); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeBoolean(allowCustomRouting); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalEnum(indexMode); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(lifecycle); } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java index be8613f5790ba..4c86a91ee82f2 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamAlias.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.ParsingException; @@ -165,9 +165,9 @@ private static Map decompress(CompressedXContent filter) { public DataStreamAlias(StreamInput in) throws IOException { this.name = in.readString(); - this.dataStreams = in.readStringList(); + this.dataStreams = in.readStringCollectionAsList(); this.writeDataStream = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.dataStreamToFilterMap = in.readMap(CompressedXContent::readCompressedString); } else { this.dataStreamToFilterMap = new HashMap<>(); @@ -398,8 +398,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeStringCollection(dataStreams); out.writeOptionalString(writeDataStream); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - out.writeMap(dataStreamToFilterMap, StreamOutput::writeString, (out1, filter) -> filter.writeTo(out1)); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { + out.writeMap(dataStreamToFilterMap, StreamOutput::writeWriteable); } else { if (dataStreamToFilterMap.isEmpty()) { out.writeBoolean(false); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java index 0d8a3c6f8e3ba..b768b468204fc 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamLifecycle.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.cluster.Diff; @@ -46,7 +47,7 @@ public class DataStreamLifecycle implements SimpleDiffable, ToXContentObject { // Versions over the wire - public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersion.V_8_500_057; + public static final TransportVersion ADDED_ENABLED_FLAG_VERSION = TransportVersions.V_8_500_057; public static final Setting CLUSTER_LIFECYCLE_DEFAULT_ROLLOVER_SETTING = new Setting<>( "cluster.lifecycle.default.rollover", @@ -172,10 +173,10 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(dataRetention); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_026)) { out.writeOptionalWriteable(downsampling); } if (out.getTransportVersion().onOrAfter(ADDED_ENABLED_FLAG_VERSION)) { @@ -184,12 +185,12 @@ public void writeTo(StreamOutput out) throws IOException { } public DataStreamLifecycle(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { dataRetention = in.readOptionalWriteable(Retention::read); } else { dataRetention = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_026)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_026)) { downsampling = in.readOptionalWriteable(Downsampling::read); } else { downsampling = null; @@ -414,12 +415,12 @@ public String toString() { } public static Downsampling read(StreamInput in) throws IOException { - return new Downsampling(in.readOptionalList(Round::read)); + return new Downsampling(in.readOptionalCollectionAsList(Round::read)); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeOptionalCollection(rounds, (o, v) -> v.writeTo(o)); + out.writeOptionalCollection(rounds, StreamOutput::writeWriteable); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java index 17d2d2d1109cc..fef9ebe993a4d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DataStreamMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -216,13 +217,13 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.dataStreams, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); - out.writeMap(this.dataStreamAliases, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + out.writeMap(this.dataStreams, StreamOutput::writeWriteable); + out.writeMap(this.dataStreamAliases, StreamOutput::writeWriteable); } public static DataStreamMetadata fromXContent(XContentParser parser) throws IOException { @@ -313,7 +314,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java index 0a7eb6fa954c2..de46e04ea6215 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNode.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -40,7 +41,7 @@ public final class DesiredNode implements Writeable, ToXContentObject, Comparable { public static final Version RANGE_FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; - public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion RANGE_FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; private static final ParseField SETTINGS_FIELD = new ParseField("settings"); private static final ParseField PROCESSORS_FIELD = new ParseField("processors"); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java index 036b31fa65be5..9c5710b91966c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodeWithStatus.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,7 +32,7 @@ public record DesiredNodeWithStatus(DesiredNode desiredNode, Status status) ToXContentObject, Comparable { - private static final TransportVersion STATUS_TRACKING_SUPPORT_VERSION = TransportVersion.V_8_4_0; + private static final TransportVersion STATUS_TRACKING_SUPPORT_VERSION = TransportVersions.V_8_4_0; private static final ParseField STATUS_FIELD = new ParseField("status"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java index cadc57c0c2a2e..e572f20557c79 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java @@ -163,7 +163,7 @@ private DesiredNodes(String historyID, long version, Map implements Metadata.Custom { - private static final TransportVersion MIN_SUPPORTED_VERSION = TransportVersion.V_8_1_0; + private static final TransportVersion MIN_SUPPORTED_VERSION = TransportVersions.V_8_1_0; public static final String TYPE = "desired_nodes"; public static final DesiredNodesMetadata EMPTY = new DesiredNodesMetadata((DesiredNodes) null); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java index a26bf8189117e..7c74014a1da2e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DiffableStringMap.java @@ -88,7 +88,7 @@ public Diff diff(DiffableStringMap previousState) { } public static Diff readDiffFrom(StreamInput in) throws IOException { - final List deletes = in.readStringList(); + final List deletes = in.readStringCollectionAsList(); final Map upserts = in.readMap(StreamInput::readString); return getDiff(deletes, upserts); } @@ -135,7 +135,7 @@ public Map getUpserts() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(deletes); - out.writeMap(upserts, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(upserts, StreamOutput::writeString); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index d244df15579b6..140eeb2e240ff 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.common.io.stream.StreamInput; @@ -78,7 +79,7 @@ private IndexGraveyard(final List list) { } public IndexGraveyard(final StreamInput in) throws IOException { - this.tombstones = in.readImmutableList(Tombstone::new); + this.tombstones = in.readCollectionAsImmutableList(Tombstone::new); } @Override @@ -88,7 +89,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -141,7 +142,7 @@ public String toString() { @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeList(tombstones); + out.writeCollection(tombstones); } @Override @@ -255,7 +256,7 @@ public static final class IndexGraveyardDiff implements NamedDiff> settings() { public static final String INDEX_STATE_FILE_PREFIX = "state-"; - static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersion.V_7_10_0; + static final TransportVersion SYSTEM_INDEX_FLAG_ADDED = TransportVersions.V_7_10_0; - static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersion.V_8_6_0; + static final TransportVersion STATS_AND_FORECAST_ADDED = TransportVersions.V_8_6_0; private final int routingNumShards; private final int routingFactor; @@ -1227,6 +1228,8 @@ public Index getResizeSourceIndex() { public static final String INDEX_DOWNSAMPLE_SOURCE_UUID_KEY = "index.downsample.source.uuid"; public static final String INDEX_DOWNSAMPLE_SOURCE_NAME_KEY = "index.downsample.source.name"; + public static final String INDEX_DOWNSAMPLE_ORIGIN_NAME_KEY = "index.downsample.origin.name"; + public static final String INDEX_DOWNSAMPLE_ORIGIN_UUID_KEY = "index.downsample.origin.uuid"; public static final String INDEX_DOWNSAMPLE_STATUS_KEY = "index.downsample.status"; public static final Setting INDEX_DOWNSAMPLE_SOURCE_UUID = Setting.simpleString( @@ -1240,6 +1243,18 @@ public Index getResizeSourceIndex() { Property.PrivateIndex ); + public static final Setting INDEX_DOWNSAMPLE_ORIGIN_NAME = Setting.simpleString( + INDEX_DOWNSAMPLE_ORIGIN_NAME_KEY, + Property.IndexScope, + Property.PrivateIndex + ); + + public static final Setting INDEX_DOWNSAMPLE_ORIGIN_UUID = Setting.simpleString( + INDEX_DOWNSAMPLE_ORIGIN_UUID_KEY, + Property.IndexScope, + Property.PrivateIndex + ); + public enum DownsampleTaskStatus { UNKNOWN, STARTED, @@ -1430,7 +1445,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws return builder; } - private static final TransportVersion SETTING_DIFF_VERSION = TransportVersion.V_8_5_0; + private static final TransportVersion SETTING_DIFF_VERSION = TransportVersions.V_8_5_0; private static class IndexMetadataDiff implements Diff { @@ -1511,7 +1526,7 @@ private static class IndexMetadataDiff implements Diff { version = in.readLong(); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { aliasesVersion = in.readVLong(); } else { aliasesVersion = 1; @@ -1562,7 +1577,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { out.writeVLong(aliasesVersion); } out.writeByte(state.id); @@ -1635,7 +1650,7 @@ public static IndexMetadata readFrom(StreamInput in, @Nullable Function v.writeTo(o)); + out.writeMap(customData, StreamOutput::writeWriteable); out.writeMap( inSyncAllocationIds, StreamOutput::writeVInt, @@ -2633,7 +2648,7 @@ private static void handleLegacyMapping(Builder builder, Map map } /** - * Return the {@link Version} of Elasticsearch that has been used to create an index given its settings. + * Return the {@link IndexVersion} of Elasticsearch that has been used to create an index given its settings. * * @throws IllegalArgumentException if the given index settings doesn't contain a value for the key * {@value IndexMetadata#SETTING_VERSION_CREATED} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java index dfc05552d3e72..31bfc937a1ed3 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexTemplateMetadata.java @@ -181,7 +181,7 @@ public int hashCode() { public static IndexTemplateMetadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(in.readString()); builder.order(in.readInt()); - builder.patterns(in.readStringList()); + builder.patterns(in.readStringCollectionAsList()); builder.settings(Settings.readSettingsFromStream(in)); int mappingsSize = in.readVInt(); for (int i = 0; i < mappingsSize; i++) { @@ -206,7 +206,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(order); out.writeStringCollection(patterns); settings.writeTo(out); - out.writeMap(mappings, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(mappings, StreamOutput::writeWriteable); out.writeCollection(aliases.values()); out.writeOptionalVInt(version); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java index 6b97fd0ad2b02..2e0c3194fb9ea 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ItemUsage.java @@ -47,17 +47,17 @@ public ItemUsage( public ItemUsage(StreamInput in) throws IOException { if (in.readBoolean()) { - this.indices = in.readSet(StreamInput::readString); + this.indices = in.readCollectionAsSet(StreamInput::readString); } else { this.indices = null; } if (in.readBoolean()) { - this.dataStreams = in.readSet(StreamInput::readString); + this.dataStreams = in.readCollectionAsSet(StreamInput::readString); } else { this.dataStreams = null; } if (in.readBoolean()) { - this.composableTemplates = in.readSet(StreamInput::readString); + this.composableTemplates = in.readCollectionAsSet(StreamInput::readString); } else { this.composableTemplates = null; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java index d102624ccb06e..ab4a2ed662b56 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MappingMetadata.java @@ -9,7 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.compress.CompressedXContent; @@ -75,7 +75,7 @@ public MappingMetadata(String type, Map mapping) { } public static void writeMappingMetadata(StreamOutput out, Map mappings) throws IOException { - out.writeMap(mappings, StreamOutput::writeString, out.getTransportVersion().before(TransportVersion.V_8_0_0) ? (o, v) -> { + out.writeMap(mappings, out.getTransportVersion().before(TransportVersions.V_8_0_0) ? (o, v) -> { o.writeVInt(v == EMPTY_MAPPINGS ? 0 : 1); if (v != EMPTY_MAPPINGS) { o.writeString(MapperService.SINGLE_MAPPING_NAME); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 14ca6eee13149..a61761497da5b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -12,6 +12,7 @@ import org.apache.logging.log4j.Logger; import org.apache.lucene.util.CollectionUtil; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; @@ -1233,8 +1234,7 @@ public Map templatesV2() { } public boolean isTimeSeriesTemplate(ComposableIndexTemplate indexTemplate) { - var template = indexTemplate.template(); - if (indexTemplate.getDataStreamTemplate() == null || template == null) { + if (indexTemplate.getDataStreamTemplate() == null) { return false; } @@ -1467,7 +1467,7 @@ public Map getMappingsByHash() { private static class MetadataDiff implements Diff { - private static final TransportVersion NOOP_METADATA_DIFF_VERSION = TransportVersion.V_8_5_0; + private static final TransportVersion NOOP_METADATA_DIFF_VERSION = TransportVersions.V_8_5_0; private static final TransportVersion NOOP_METADATA_DIFF_SAFE_VERSION = PublicationTransportHandler.INCLUDES_LAST_COMMITTED_DATA_VERSION; @@ -1535,7 +1535,7 @@ private MetadataDiff(StreamInput in) throws IOException { coordinationMetadata = new CoordinationMetadata(in); transientSettings = Settings.readSettingsFromStream(in); persistentSettings = Settings.readSettingsFromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { hashesOfConsistentSettings = DiffableStringMap.readDiffFrom(in); } else { hashesOfConsistentSettings = DiffableStringMap.DiffableStringMapDiff.EMPTY; @@ -1543,7 +1543,7 @@ private MetadataDiff(StreamInput in) throws IOException { indices = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), INDEX_METADATA_DIFF_VALUE_READER); templates = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), TEMPLATES_DIFF_VALUE_READER); customs = DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { reservedStateMetadata = DiffableUtils.readJdkMapDiff( in, DiffableUtils.getStringKeySerializer(), @@ -1572,13 +1572,13 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); transientSettings.writeTo(out); persistentSettings.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { hashesOfConsistentSettings.writeTo(out); } indices.writeTo(out); templates.writeTo(out); customs.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { reservedStateMetadata.writeTo(out); } } @@ -1611,7 +1611,7 @@ public Metadata apply(Metadata part) { } } - public static final TransportVersion MAPPINGS_AS_HASH_VERSION = TransportVersion.V_8_1_0; + public static final TransportVersion MAPPINGS_AS_HASH_VERSION = TransportVersions.V_8_1_0; public static Metadata readFrom(StreamInput in) throws IOException { Builder builder = new Builder(); @@ -1621,7 +1621,7 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.coordinationMetadata(new CoordinationMetadata(in)); builder.transientSettings(readSettingsFromStream(in)); builder.persistentSettings(readSettingsFromStream(in)); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { builder.hashesOfConsistentSettings(DiffableStringMap.readFrom(in)); } final Function mappingLookup; @@ -1648,7 +1648,7 @@ public static Metadata readFrom(StreamInput in) throws IOException { Custom customIndexMetadata = in.readNamedWriteable(Custom.class); builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { int reservedStateSize = in.readVInt(); for (int i = 0; i < reservedStateSize; i++) { builder.put(ReservedStateMetadata.readFrom(in)); @@ -1665,7 +1665,7 @@ public void writeTo(StreamOutput out) throws IOException { coordinationMetadata.writeTo(out); transientSettings.writeTo(out); persistentSettings.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { hashesOfConsistentSettings.writeTo(out); } // Starting in #MAPPINGS_AS_HASH_VERSION we write the mapping metadata first and then write the indices without metadata so that @@ -1680,7 +1680,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeCollection(templates.values()); VersionedNamedWriteable.writeVersionedWritables(out, customs); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeCollection(reservedStateMetadata.values()); } } @@ -2463,29 +2463,34 @@ static SortedMap buildIndicesLookup( } SortedMap indicesLookup = new TreeMap<>(); Map indexToDataStreamLookup = new HashMap<>(); - final var dataStreams = dataStreamMetadata.dataStreams(); - for (DataStreamAlias alias : dataStreamMetadata.getDataStreamAliases().values()) { - IndexAbstraction existing = indicesLookup.put(alias.getName(), makeDsAliasAbstraction(dataStreams, alias)); - assert existing == null : "duplicate data stream alias for " + alias.getName(); - } - for (DataStream dataStream : dataStreams.values()) { - assert dataStream.getIndices().isEmpty() == false; + collectDataStreams(dataStreamMetadata, indicesLookup, indexToDataStreamLookup); - IndexAbstraction existing = indicesLookup.put(dataStream.getName(), dataStream); - assert existing == null : "duplicate data stream for " + dataStream.getName(); + Map> aliasToIndices = new HashMap<>(); + collectIndices(indices, indexToDataStreamLookup, indicesLookup, aliasToIndices); + collectAliases(aliasToIndices, indicesLookup); - for (Index i : dataStream.getIndices()) { - indexToDataStreamLookup.put(i.getName(), dataStream); - } + return Collections.unmodifiableSortedMap(indicesLookup); + } + + private static void collectAliases(Map> aliasToIndices, Map indicesLookup) { + for (var entry : aliasToIndices.entrySet()) { + AliasMetadata alias = entry.getValue().get(0).getAliases().get(entry.getKey()); + IndexAbstraction existing = indicesLookup.put(entry.getKey(), new IndexAbstraction.Alias(alias, entry.getValue())); + assert existing == null : "duplicate for " + entry.getKey(); } + } - Map> aliasToIndices = new HashMap<>(); + private static void collectIndices( + Map indices, + Map indexToDataStreamLookup, + Map indicesLookup, + Map> aliasToIndices + ) { for (var entry : indices.entrySet()) { final String name = entry.getKey(); final IndexMetadata indexMetadata = entry.getValue(); final DataStream parent = indexToDataStreamLookup.get(name); - assert parent == null || parent.getIndices().stream().anyMatch(index -> name.equals(index.getName())) - : "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex(); + assert assertContainsIndexIfDataStream(parent, indexMetadata); IndexAbstraction existing = indicesLookup.put(name, new ConcreteIndex(indexMetadata, parent)); assert existing == null : "duplicate for " + indexMetadata.getIndex(); @@ -2494,14 +2499,33 @@ static SortedMap buildIndicesLookup( aliasIndices.add(indexMetadata); } } + } - for (var entry : aliasToIndices.entrySet()) { - AliasMetadata alias = entry.getValue().get(0).getAliases().get(entry.getKey()); - IndexAbstraction existing = indicesLookup.put(entry.getKey(), new IndexAbstraction.Alias(alias, entry.getValue())); - assert existing == null : "duplicate for " + entry.getKey(); + private static boolean assertContainsIndexIfDataStream(DataStream parent, IndexMetadata indexMetadata) { + assert parent == null + || parent.getIndices().stream().anyMatch(index -> indexMetadata.getIndex().getName().equals(index.getName())) + : "Expected data stream [" + parent.getName() + "] to contain index " + indexMetadata.getIndex(); + return true; + } + + private static void collectDataStreams( + DataStreamMetadata dataStreamMetadata, + Map indicesLookup, + Map indexToDataStreamLookup + ) { + final var dataStreams = dataStreamMetadata.dataStreams(); + for (DataStreamAlias alias : dataStreamMetadata.getDataStreamAliases().values()) { + IndexAbstraction existing = indicesLookup.put(alias.getName(), makeDsAliasAbstraction(dataStreams, alias)); + assert existing == null : "duplicate data stream alias for " + alias.getName(); } + for (DataStream dataStream : dataStreams.values()) { + IndexAbstraction existing = indicesLookup.put(dataStream.getName(), dataStream); + assert existing == null : "duplicate data stream for " + dataStream.getName(); - return Collections.unmodifiableSortedMap(indicesLookup); + for (Index i : dataStream.getIndices()) { + indexToDataStreamLookup.put(i.getName(), dataStream); + } + } } private static IndexAbstraction.Alias makeDsAliasAbstraction(Map dataStreams, DataStreamAlias alias) { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java index 8423a5ad37334..3fb6eafb5c606 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataDataStreamsService.java @@ -155,6 +155,15 @@ static ClusterState updateDataLifecycle( Metadata.Builder builder = Metadata.builder(metadata); for (var dataStreamName : dataStreamNames) { var dataStream = validateDataStream(metadata, dataStreamName); + if (dataStream.isSystem()) { + if (lifecycle != null && lifecycle.getDownsamplingRounds() != null) { + throw new IllegalArgumentException( + "System data streams do not support downsampling as part of their lifecycle configuration. Encountered [" + + dataStream.getName() + + "] in the request" + ); + } + } builder.put( new DataStream( dataStream.getName(), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index a5207adb20b01..08b0d56da782a 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -41,7 +42,7 @@ */ public class NodesShutdownMetadata implements Metadata.Custom { public static final String TYPE = "node_shutdown"; - public static final TransportVersion NODE_SHUTDOWN_VERSION = TransportVersion.V_7_13_0; + public static final TransportVersion NODE_SHUTDOWN_VERSION = TransportVersions.V_7_13_0; public static final NodesShutdownMetadata EMPTY = new NodesShutdownMetadata(Map.of()); private static final ParseField NODES_FIELD = new ParseField("nodes"); @@ -79,7 +80,7 @@ public NodesShutdownMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(nodes, StreamOutput::writeString, (outStream, v) -> v.writeTo(outStream)); + out.writeMap(nodes, StreamOutput::writeWriteable); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java index a53aa027fc7a6..9b07fbadb2328 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -174,11 +175,11 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } public RepositoriesMetadata(StreamInput in) throws IOException { - this.repositories = in.readImmutableList(RepositoryMetadata::new); + this.repositories = in.readCollectionAsImmutableList(RepositoryMetadata::new); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -190,7 +191,7 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOException */ @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(repositories); + out.writeCollection(repositories); } public static RepositoriesMetadata fromXContent(XContentParser parser) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateErrorMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateErrorMetadata.java index 892a8b188adc3..92ff6b4a2c567 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateErrorMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateErrorMetadata.java @@ -51,7 +51,7 @@ public record ReservedStateErrorMetadata(Long version, ErrorKind errorKind, List public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeString(errorKind.getKindValue()); - out.writeCollection(errors, StreamOutput::writeString); + out.writeStringCollection(errors); } /** @@ -62,7 +62,11 @@ public void writeTo(StreamOutput out) throws IOException { * @throws IOException */ public static ReservedStateErrorMetadata readFrom(StreamInput in) throws IOException { - return new ReservedStateErrorMetadata(in.readLong(), ErrorKind.of(in.readString()), in.readList(StreamInput::readString)); + return new ReservedStateErrorMetadata( + in.readLong(), + ErrorKind.of(in.readString()), + in.readCollectionAsList(StreamInput::readString) + ); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateHandlerMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateHandlerMetadata.java index b4e51c3fbdaa7..b78a17c11dfb8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateHandlerMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ReservedStateHandlerMetadata.java @@ -42,7 +42,7 @@ public record ReservedStateHandlerMetadata(String name, Set keys) @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeCollection(keys, StreamOutput::writeString); + out.writeStringCollection(keys); } /** @@ -53,13 +53,13 @@ public void writeTo(StreamOutput out) throws IOException { * @throws IOException */ public static ReservedStateHandlerMetadata readFrom(StreamInput in) throws IOException { - return new ReservedStateHandlerMetadata(in.readString(), in.readSet(StreamInput::readString)); + return new ReservedStateHandlerMetadata(in.readString(), in.readCollectionAsSet(StreamInput::readString)); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(name()); - builder.stringListField(KEYS.getPreferredName(), keys().stream().sorted().toList()); // ordered keys for output consistency + builder.array(KEYS.getPreferredName(), keys().stream().sorted().toArray(String[]::new)); // ordered keys for output consistency builder.endObject(); return builder; } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java index 7f7f8a87cbfb2..5597f3359a1c7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ShutdownShardMigrationStatus.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -22,7 +23,7 @@ import java.util.Objects; public class ShutdownShardMigrationStatus implements Writeable, ToXContentObject { - private static final TransportVersion ALLOCATION_DECISION_ADDED_VERSION = TransportVersion.V_7_16_0; + private static final TransportVersion ALLOCATION_DECISION_ADDED_VERSION = TransportVersions.V_7_16_0; public static final String NODE_ALLOCATION_DECISION_KEY = "node_allocation_decision"; diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java index 620d07b874acd..aaf256a49a0a5 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/SingleNodeShutdownMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -33,9 +34,9 @@ */ public class SingleNodeShutdownMetadata implements SimpleDiffable, ToXContentObject { - public static final TransportVersion REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = TransportVersion.V_7_16_0; - public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersion.V_8_500_010; - public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersion.V_8_500_010; + public static final TransportVersion REPLACE_SHUTDOWN_TYPE_ADDED_VERSION = TransportVersions.V_7_16_0; + public static final TransportVersion SIGTERM_ADDED_VERSION = TransportVersions.V_8_500_020; + public static final TransportVersion GRACE_PERIOD_ADDED_VERSION = TransportVersions.V_8_500_020; public static final ParseField NODE_ID_FIELD = new ParseField("node_id"); public static final ParseField TYPE_FIELD = new ParseField("type"); @@ -467,6 +468,16 @@ public static Type parse(String type) { default -> throw new IllegalArgumentException("unknown shutdown type: " + type); }; } + + /** + * @return True if this shutdown type indicates that the node will be permanently removed from the cluster, false otherwise. + */ + public boolean isRemovalType() { + return switch (this) { + case REMOVE, SIGTERM, REPLACE -> true; + case RESTART -> false; + }; + } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java index acdfbde568e11..d36b70b49c6ab 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Template.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.Strings; @@ -123,7 +123,7 @@ public Template(StreamInput in) throws IOException { } if (in.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { this.lifecycle = in.readOptionalWriteable(DataStreamLifecycle::new); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { boolean isExplicitNull = in.readBoolean(); if (isExplicitNull) { this.lifecycle = DataStreamLifecycle.newBuilder().enabled(false).build(); @@ -173,11 +173,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeMap(this.aliases, StreamOutput::writeString, (stream, aliasMetadata) -> aliasMetadata.writeTo(stream)); + out.writeMap(this.aliases, StreamOutput::writeWriteable); } if (out.getTransportVersion().onOrAfter(DataStreamLifecycle.ADDED_ENABLED_FLAG_VERSION)) { out.writeOptionalWriteable(lifecycle); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { boolean isExplicitNull = lifecycle != null && lifecycle.isEnabled() == false; out.writeBoolean(isExplicitNull); if (isExplicitNull == false) { diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java index dfecc0ce2b215..31cbe3b0763e1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNode.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.node; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -68,7 +69,7 @@ public static boolean isServerless() { } static final String COORDINATING_ONLY = "coordinating_only"; - public static final TransportVersion EXTERNAL_ID_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion EXTERNAL_ID_VERSION = TransportVersions.V_8_3_0; public static final Comparator DISCOVERY_NODE_COMPARATOR = Comparator.comparing(DiscoveryNode::getName) .thenComparing(DiscoveryNode::getId); @@ -344,7 +345,7 @@ public DiscoveryNode(StreamInput in) throws IOException { } } this.roles = Collections.unmodifiableSortedSet(roles); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_024)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_024)) { versionInfo = new VersionInformation(Version.readVersion(in), IndexVersion.readVersion(in), IndexVersion.readVersion(in)); } else { versionInfo = inferVersionInformation(Version.readVersion(in)); @@ -375,13 +376,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(hostName); out.writeString(hostAddress); address.writeTo(out); - out.writeMap(attributes, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(attributes, StreamOutput::writeString); out.writeCollection(roles, (o, role) -> { o.writeString(role.roleName()); o.writeString(role.roleNameAbbreviation()); o.writeBoolean(role.canContainData()); }); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_024)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_024)) { Version.writeVersion(versionInfo.nodeVersion(), out); IndexVersion.writeVersion(versionInfo.minIndexVersion(), out); IndexVersion.writeVersion(versionInfo.maxIndexVersion(), out); diff --git a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java index 6726d5adebc40..3ee28437ff81c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/node/DiscoveryNodes.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.node; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; @@ -28,6 +28,7 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -38,6 +39,8 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.elasticsearch.cluster.routing.allocation.DataTier.ALL_DATA_TIERS; + /** * This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to * access, modify merge / diff discovery nodes. @@ -66,7 +69,7 @@ public class DiscoveryNodes implements Iterable, SimpleDiffable availableRoles; + private final Map> tiersToNodeIds; private DiscoveryNodes( long nodeLeftGeneration, @@ -81,7 +84,7 @@ private DiscoveryNodes( Version minNodeVersion, IndexVersion maxDataNodeCompatibleIndexVersion, IndexVersion minSupportedIndexVersion, - Set availableRoles + Map> tiersToNodeIds ) { this.nodeLeftGeneration = nodeLeftGeneration; this.nodes = nodes; @@ -99,7 +102,7 @@ private DiscoveryNodes( this.maxDataNodeCompatibleIndexVersion = maxDataNodeCompatibleIndexVersion; this.minSupportedIndexVersion = minSupportedIndexVersion; assert (localNodeId == null) == (localNode == null); - this.availableRoles = availableRoles; + this.tiersToNodeIds = tiersToNodeIds; } public DiscoveryNodes withMasterNodeId(@Nullable String masterNodeId) { @@ -117,7 +120,7 @@ public DiscoveryNodes withMasterNodeId(@Nullable String masterNodeId) { minNodeVersion, maxDataNodeCompatibleIndexVersion, minSupportedIndexVersion, - availableRoles + tiersToNodeIds ); } @@ -150,13 +153,12 @@ public boolean isLocalNodeElectedMaster() { } /** - * Checks if any node has the role with the given {@code roleName}. + * Gets a {@link Map} of node roles to node IDs which have those roles. * - * @param roleName name to check - * @return true if any node has the role of the given name + * @return {@link Map} of node roles to node IDs which have those roles. */ - public boolean isRoleAvailable(String roleName) { - return availableRoles.contains(roleName); + public Map> getTiersToNodeIds() { + return tiersToNodeIds; } /** @@ -664,7 +666,7 @@ public String shortSummary() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(masterNodeId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeVLong(nodeLeftGeneration); } // else nodeLeftGeneration is zero, or we're sending this to a remote cluster which does not care about the nodeLeftGeneration out.writeCollection(nodes.values()); @@ -679,7 +681,7 @@ public static DiscoveryNodes readFrom(StreamInput in, DiscoveryNode localNode) t builder.localNodeId(localNode.getId()); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { builder.nodeLeftGeneration(in.readVLong()); } // else nodeLeftGeneration is zero, or we're receiving this from a remote cluster so the nodeLeftGeneration does not matter to us @@ -720,6 +722,7 @@ public static class Builder { private final long oldNodeLeftGeneration; @Nullable // if not specified private Long nodeLeftGeneration; + private boolean resetNodeLeftGeneration; public Builder() { nodes = new HashMap<>(); @@ -855,7 +858,10 @@ public DiscoveryNodes build() { } else if (this.nodeLeftGeneration != null) { // only happens during deserialization assert removedNode == false; + assert resetNodeLeftGeneration == false; newNodeLeftGeneration = nodeLeftGeneration; + } else if (resetNodeLeftGeneration) { + newNodeLeftGeneration = 0L; } else if (removedNode) { newNodeLeftGeneration = oldNodeLeftGeneration + 1L; } else { @@ -876,14 +882,28 @@ public DiscoveryNodes build() { Objects.requireNonNullElse(minNodeVersion, Version.CURRENT.minimumCompatibilityVersion()), Objects.requireNonNullElse(maxDataNodeCompatibleIndexVersion, IndexVersion.current()), Objects.requireNonNullElse(minSupportedIndexVersion, IndexVersion.MINIMUM_COMPATIBLE), - dataNodes.values() - .stream() - .flatMap(n -> n.getRoles().stream()) - .map(DiscoveryNodeRole::roleName) - .collect(Collectors.toUnmodifiableSet()) + computeTiersToNodesMap(dataNodes) ); } + private static Map> computeTiersToNodesMap(final Map dataNodes) { + Map> tiersToNodes = new HashMap<>(ALL_DATA_TIERS.size() + 1); + for (var node : dataNodes.values()) { + if (node.hasRole(DiscoveryNodeRole.DATA_ROLE.roleName())) { + tiersToNodes.computeIfAbsent(DiscoveryNodeRole.DATA_ROLE.roleName(), (key) -> new HashSet<>()).add(node.getId()); + } + for (var role : ALL_DATA_TIERS) { + if (node.hasRole(role)) { + tiersToNodes.computeIfAbsent(role, (key) -> new HashSet<>()).add(node.getId()); + } + } + } + for (var entry : tiersToNodes.entrySet()) { + entry.setValue(Collections.unmodifiableSet(entry.getValue())); + } + return Collections.unmodifiableMap(tiersToNodes); + } + public boolean isLocalNodeElectedMaster() { return masterNodeId != null && masterNodeId.equals(localNodeId); } @@ -893,6 +913,11 @@ void nodeLeftGeneration(long nodeLeftGeneration) { assert this.nodeLeftGeneration == null : nodeLeftGeneration + " vs " + this.nodeLeftGeneration; this.nodeLeftGeneration = nodeLeftGeneration; } + + public void resetNodeLeftGeneration() { + assert this.resetNodeLeftGeneration == false; + this.resetNodeLeftGeneration = true; + } } private static Map filteredNodes(Map nodes, Predicate predicate) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java index c719ce8f7dd27..d02a1f1e973cf 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/DelayedAllocationService.java @@ -21,6 +21,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.Scheduler; @@ -96,7 +97,7 @@ public void onFailure(Exception e) { logger.warn("failed to submit schedule/execute reroute post unassigned shard", e); removeIfSameTask(DelayedRerouteTask.this); } - }, nextDelay, ThreadPool.Names.SAME); + }, nextDelay, EsExecutors.DIRECT_EXECUTOR_SERVICE); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java index 37ae9784d9cfa..b0c79102fcd80 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/ShardRouting.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource; import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource; @@ -38,8 +39,8 @@ public final class ShardRouting implements Writeable, ToXContentObject { * Used if shard size is not available */ public static final long UNAVAILABLE_EXPECTED_SHARD_SIZE = -1; - private static final TransportVersion EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION = TransportVersion.V_8_5_0; - private static final TransportVersion RELOCATION_FAILURE_INFO_VERSION = TransportVersion.V_8_6_0; + private static final TransportVersion EXPECTED_SHARD_SIZE_FOR_STARTED_VERSION = TransportVersions.V_8_5_0; + private static final TransportVersion RELOCATION_FAILURE_INFO_VERSION = TransportVersions.V_8_6_0; private final ShardId shardId; private final String currentNodeId; @@ -352,7 +353,7 @@ public ShardRouting(ShardId shardId, StreamInput in) throws IOException { } else { expectedShardSize = UNAVAILABLE_EXPECTED_SHARD_SIZE; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { role = Role.readFrom(in); } else { role = Role.DEFAULT; @@ -389,7 +390,7 @@ public void writeToThin(StreamOutput out) throws IOException { out.writeLong(expectedShardSize); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { role.writeTo(out); } else if (role != Role.DEFAULT) { throw new IllegalStateException( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java index 636e50ec5cbfb..59b344080c054 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/UnassignedInfo.java @@ -10,6 +10,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; @@ -38,6 +39,8 @@ import java.util.Optional; import java.util.Set; +import static org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING; + /** * Holds additional information as to why the shard is in unassigned state. */ @@ -47,14 +50,17 @@ public final class UnassignedInfo implements ToXContentFragment, Writeable { * The version that the {@code lastAllocatedNode} field was added in. Used to adapt streaming of this class as appropriate for the * version of the node sending/receiving it. Should be removed once wire compatibility with this version is no longer necessary. */ - private static final TransportVersion VERSION_LAST_ALLOCATED_NODE_ADDED = TransportVersion.V_7_15_0; - private static final TransportVersion VERSION_UNPROMOTABLE_REPLICA_ADDED = TransportVersion.V_8_7_0; + private static final TransportVersion VERSION_LAST_ALLOCATED_NODE_ADDED = TransportVersions.V_7_15_0; + private static final TransportVersion VERSION_UNPROMOTABLE_REPLICA_ADDED = TransportVersions.V_8_7_0; public static final DateFormatter DATE_TIME_FORMATTER = DateFormatter.forPattern("date_optional_time").withZone(ZoneOffset.UTC); - public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.positiveTimeSetting( + public static final Setting INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting( "index.unassigned.node_left.delayed_timeout", - TimeValue.timeValueMinutes(1), + settings -> EXISTING_SHARDS_ALLOCATOR_SETTING.get(settings).equals("stateless") + ? TimeValue.timeValueSeconds(10) + : TimeValue.timeValueMinutes(1), + TimeValue.timeValueMillis(0), Property.Dynamic, Property.IndexScope ); @@ -301,7 +307,7 @@ public UnassignedInfo(StreamInput in) throws IOException { this.failure = in.readException(); this.failedAllocations = in.readVInt(); this.lastAllocationStatus = AllocationStatus.readFrom(in); - this.failedNodeIds = in.readImmutableSet(StreamInput::readString); + this.failedNodeIds = in.readCollectionAsImmutableSet(StreamInput::readString); if (in.getTransportVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { this.lastAllocatedNodeId = in.readOptionalString(); } else { @@ -324,7 +330,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeException(failure); out.writeVInt(failedAllocations); lastAllocationStatus.writeTo(out); - out.writeCollection(failedNodeIds, StreamOutput::writeString); + out.writeStringCollection(failedNodeIds); if (out.getTransportVersion().onOrAfter(VERSION_LAST_ALLOCATED_NODE_ADDED)) { out.writeOptionalString(lastAllocatedNodeId); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java index bbfdf856cf787..11596f9420709 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AbstractAllocationDecision.java @@ -40,7 +40,7 @@ protected AbstractAllocationDecision(@Nullable DiscoveryNode targetNode, @Nullab protected AbstractAllocationDecision(StreamInput in) throws IOException { targetNode = in.readOptionalWriteable(DiscoveryNode::new); - nodeDecisions = in.readBoolean() ? in.readImmutableList(NodeAllocationResult::new) : null; + nodeDecisions = in.readBoolean() ? in.readCollectionAsImmutableList(NodeAllocationResult::new) : null; } /** @@ -83,7 +83,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(targetNode); if (nodeDecisions != null) { out.writeBoolean(true); - out.writeList(nodeDecisions); + out.writeCollection(nodeDecisions); } else { out.writeBoolean(false); } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java index 56a42cff10fbb..853a26263fe9f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/ClusterBalanceStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; @@ -65,8 +65,8 @@ public static ClusterBalanceStats readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(tiers, StreamOutput::writeString, StreamOutput::writeWriteable); - out.writeMap(nodes, StreamOutput::writeString, StreamOutput::writeWriteable); + out.writeMap(tiers, StreamOutput::writeWriteable); + out.writeMap(nodes, StreamOutput::writeWriteable); } @Override @@ -210,8 +210,8 @@ private static NodeBalanceStats createFrom( public static NodeBalanceStats readFrom(StreamInput in) throws IOException { return new NodeBalanceStats( - in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) ? in.readString() : UNKNOWN_NODE_ID, - in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) ? in.readStringList() : List.of(), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readString() : UNKNOWN_NODE_ID, + in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readStringCollectionAsList() : List.of(), in.readInt(), in.readDouble(), in.readLong(), @@ -221,10 +221,10 @@ public static NodeBalanceStats readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(nodeId); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeStringCollection(roles); } out.writeInt(shards); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java index d98979c6d0611..279c774127e04 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconciler.java @@ -18,6 +18,7 @@ import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider; @@ -31,11 +32,9 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.ThreadPool; -import java.util.Collections; import java.util.Comparator; -import java.util.List; +import java.util.Iterator; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BiFunction; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -179,7 +178,7 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { while (unassignedIterator.hasNext()) { final ShardRouting shardRouting = unassignedIterator.next(); final UnassignedInfo unassignedInfo = shardRouting.unassignedInfo(); - if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == UnassignedInfo.AllocationStatus.NO_ATTEMPT) { + if (shardRouting.primary() && unassignedInfo.getLastAllocationStatus() == AllocationStatus.NO_ATTEMPT) { unassignedIterator.updateUnassigned( new UnassignedInfo( unassignedInfo.getReason(), @@ -189,7 +188,7 @@ private void failAllocationOfNewPrimaries(RoutingAllocation allocation) { unassignedInfo.getUnassignedTimeInNanos(), unassignedInfo.getUnassignedTimeInMillis(), unassignedInfo.isDelayed(), - UnassignedInfo.AllocationStatus.DECIDERS_NO, + AllocationStatus.DECIDERS_NO, unassignedInfo.getFailedNodeIds(), unassignedInfo.getLastAllocatedNodeId() ), @@ -249,69 +248,60 @@ private void allocateUnassigned() { final var shard = primary[i]; final var assignment = desiredBalance.getAssignment(shard.shardId()); final boolean ignored = assignment == null || isIgnored(routingNodes, shard, assignment); - final var isThrottled = new AtomicBoolean(false); - if (ignored == false) { - for (final var nodeIdIterator : List.of( - getDesiredNodesIds(shard, assignment), - getFallbackNodeIds(shard, isThrottled) - )) { - for (final var desiredNodeId : nodeIdIterator) { - final var routingNode = routingNodes.node(desiredNodeId); - if (routingNode == null) { - // desired node no longer exists - continue; - } - final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); - switch (decision.type()) { - case YES -> { - logger.debug("Assigning shard [{}] to [{}]", shard, desiredNodeId); - final long shardSize = DiskThresholdDecider.getExpectedShardSize( - shard, - ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, - allocation.clusterInfo(), - allocation.snapshotShardSizeInfo(), - allocation.metadata(), - allocation.routingTable() - ); - routingNodes.initializeShard(shard, desiredNodeId, null, shardSize, allocation.changes()); - allocationOrdering.recordAllocation(desiredNodeId); - if (shard.primary() == false) { - // copy over the same replica shards to the secondary array so they will get allocated - // in a subsequent iteration, allowing replicas of other shards to be allocated first - while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - secondary[secondaryLength++] = primary[++i]; - } + AllocationStatus unallocatedStatus; + if (ignored) { + unallocatedStatus = AllocationStatus.NO_ATTEMPT; + } else { + unallocatedStatus = AllocationStatus.DECIDERS_NO; + final var nodeIdsIterator = new NodeIdsIterator(shard, assignment); + while (nodeIdsIterator.hasNext()) { + final var nodeId = nodeIdsIterator.next(); + final var routingNode = routingNodes.node(nodeId); + if (routingNode == null) { + // desired node no longer exists + continue; + } + final var decision = allocation.deciders().canAllocate(shard, routingNode, allocation); + switch (decision.type()) { + case YES -> { + logger.debug("Assigning shard [{}] to {} [{}]", shard, nodeIdsIterator.source, nodeId); + final long shardSize = DiskThresholdDecider.getExpectedShardSize( + shard, + ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, + allocation.clusterInfo(), + allocation.snapshotShardSizeInfo(), + allocation.metadata(), + allocation.routingTable() + ); + routingNodes.initializeShard(shard, nodeId, null, shardSize, allocation.changes()); + allocationOrdering.recordAllocation(nodeId); + if (shard.primary() == false) { + // copy over the same replica shards to the secondary array so they will get allocated + // in a subsequent iteration, allowing replicas of other shards to be allocated first + while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { + secondary[secondaryLength++] = primary[++i]; } - continue nextShard; - } - case THROTTLE -> { - isThrottled.set(true); - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), desiredNodeId, decision); - } - case NO -> { - logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), desiredNodeId, decision); } + continue nextShard; + } + case THROTTLE -> { + nodeIdsIterator.wasThrottled = true; + unallocatedStatus = AllocationStatus.DECIDERS_THROTTLED; + logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); + } + case NO -> { + logger.trace("Couldn't assign shard [{}] to [{}]: {}", shard.shardId(), nodeId, decision); } } } } - logger.debug("No eligible node found to assign shard [{}] amongst [{}]", shard, assignment); - - final UnassignedInfo.AllocationStatus allocationStatus; - if (ignored) { - allocationStatus = UnassignedInfo.AllocationStatus.NO_ATTEMPT; - } else if (isThrottled.get()) { - allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_THROTTLED; - } else { - allocationStatus = UnassignedInfo.AllocationStatus.DECIDERS_NO; - } - - unassigned.ignoreShard(shard, allocationStatus, allocation.changes()); + logger.debug("No eligible node found to assign shard [{}]", shard); + unassigned.ignoreShard(shard, unallocatedStatus, allocation.changes()); if (shard.primary() == false) { // we could not allocate it and we are a replica - check if we can ignore the other replicas while (i < primaryLength - 1 && comparator.compare(primary[i], primary[i + 1]) == 0) { - unassigned.ignoreShard(primary[++i], allocationStatus, allocation.changes()); + unassigned.ignoreShard(primary[++i], unallocatedStatus, allocation.changes()); } } } @@ -323,23 +313,57 @@ private void allocateUnassigned() { } while (primaryLength > 0); } - private Iterable getDesiredNodesIds(ShardRouting shard, ShardAssignment assignment) { - return allocationOrdering.sort(allocation.deciders().getForcedInitialShardAllocationToNodes(shard, allocation).map(forced -> { - logger.debug("Shard [{}] assignment is ignored. Initial allocation forced to {}", shard.shardId(), forced); - return forced; - }).orElse(assignment.nodeIds())); - } + private final class NodeIdsIterator implements Iterator { + + private final ShardRouting shard; - private Iterable getFallbackNodeIds(ShardRouting shard, AtomicBoolean isThrottled) { - return () -> { - if (shard.primary() && isThrottled.get() == false) { + /** + * Contains the source of the nodeIds used for shard assignment. It could be: + * * desired - when using desired nodes + * * forced initial allocation - when initial allocation is forced to certain nodes by shrink/split/clone index operation + * * fallback - when assigning the primary shard is temporarily not possible on desired nodes, + * and it is assigned elsewhere in the cluster + */ + private NodeIdSource source; + private Iterator nodeIds; + + private boolean wasThrottled = false; + + NodeIdsIterator(ShardRouting shard, ShardAssignment assignment) { + this.shard = shard; + + var forcedInitialAllocation = allocation.deciders().getForcedInitialShardAllocationToNodes(shard, allocation); + if (forcedInitialAllocation.isPresent()) { + logger.debug("Shard [{}] initial allocation is forced to {}", shard.shardId(), forcedInitialAllocation.get()); + nodeIds = allocationOrdering.sort(forcedInitialAllocation.get()).iterator(); + source = NodeIdSource.FORCED_INITIAL_ALLOCATION; + } else { + nodeIds = allocationOrdering.sort(assignment.nodeIds()).iterator(); + source = NodeIdSource.DESIRED; + } + } + + @Override + public boolean hasNext() { + if (nodeIds.hasNext() == false && source == NodeIdSource.DESIRED && shard.primary() && wasThrottled == false) { var fallbackNodeIds = allocation.routingNodes().getAllNodeIds(); logger.debug("Shard [{}] assignment is temporarily not possible. Falling back to {}", shard.shardId(), fallbackNodeIds); - return allocationOrdering.sort(fallbackNodeIds).iterator(); - } else { - return Collections.emptyIterator(); + nodeIds = allocationOrdering.sort(fallbackNodeIds).iterator(); + source = NodeIdSource.FALLBACK; } - }; + return nodeIds.hasNext(); + } + + @Override + public String next() { + return nodeIds.next(); + } + } + + private enum NodeIdSource { + DESIRED, + FORCED_INITIAL_ALLOCATION, + FALLBACK; } private boolean isIgnored(RoutingNodes routingNodes, ShardRouting shard, ShardAssignment assignment) { diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java index b465b1fcbc619..c017d77362427 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.routing.allocation.allocator; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -30,7 +31,7 @@ public record DesiredBalanceStats( long cumulativeReconciliationTime ) implements Writeable, ToXContentObject { - private static final TransportVersion COMPUTED_SHARD_MOVEMENTS_VERSION = TransportVersion.V_8_8_0; + private static final TransportVersion COMPUTED_SHARD_MOVEMENTS_VERSION = TransportVersions.V_8_8_0; public static DesiredBalanceStats readFrom(StreamInput in) throws IOException { return new DesiredBalanceStats( diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java index 51a5c172c6a2b..208be8adbe18f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/command/AllocationCommands.java @@ -99,7 +99,7 @@ public static AllocationCommands readFrom(StreamInput in) throws IOException { * @throws IOException if something happens during write */ public static void writeTo(AllocationCommands commands, StreamOutput out) throws IOException { - out.writeNamedWriteableList(commands.commands); + out.writeNamedWriteableCollection(commands.commands); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierRecordingService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierRecordingService.java index 46354f892955c..b1a2726e468e9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierRecordingService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierRecordingService.java @@ -125,7 +125,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(recordings, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(recordings, StreamOutput::writeWriteable); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java index 332910959a459..4230838a97592 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterApplierService.java @@ -267,7 +267,7 @@ public void run() { return; } if (timeout != null) { - notifyTimeout.cancellable = threadPool.schedule(notifyTimeout, timeout, ThreadPool.Names.GENERIC); + notifyTimeout.cancellable = threadPool.schedule(notifyTimeout, timeout, threadPool.generic()); } listener.postAdded(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java b/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java index 2efafb228bb54..a21d182ae32f8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/ClusterStateUpdateStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.service; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -117,7 +117,7 @@ public ClusterStateUpdateStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) : out.getTransportVersion(); + assert out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) : out.getTransportVersion(); out.writeVLong(unchangedTaskCount); out.writeVLong(publicationSuccessCount); out.writeVLong(publicationFailureCount); diff --git a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java index ad3aeacaeb583..f037f4780b28d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/MasterService.java @@ -714,7 +714,7 @@ public void onCommit(TimeValue commitTime) { } else if (countDown.countDown()) { finish(); } else { - this.ackTimeoutCallback = threadPool.schedule(this::onTimeout, timeLeft, ThreadPool.Names.GENERIC); + this.ackTimeoutCallback = threadPool.schedule(this::onTimeout, timeLeft, threadPool.generic()); // re-check if onNodeAck has not completed while we were scheduling the timeout if (countDown.isCountedDown()) { ackTimeoutCallback.cancel(); @@ -1525,7 +1525,7 @@ public void submitTask(String source, T task, @Nullable TimeValue timeout) { timeoutCancellable = threadPool.schedule( new TaskTimeoutHandler<>(timeout, source, taskHolder), timeout, - ThreadPool.Names.GENERIC + threadPool.generic() ); } else { timeoutCancellable = null; diff --git a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java index d22c22a22be10..711f0c84136e7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java +++ b/server/src/main/java/org/elasticsearch/cluster/service/TransportVersionsFixupListener.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.ClusterStateTaskExecutor; import org.elasticsearch.cluster.ClusterStateTaskListener; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.SuppressForbidden; @@ -34,7 +35,9 @@ import java.util.HashSet; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; +import java.util.concurrent.Executor; import java.util.stream.Collectors; import static org.elasticsearch.cluster.ClusterState.INFERRED_TRANSPORT_VERSION; @@ -53,22 +56,30 @@ public class TransportVersionsFixupListener implements ClusterStateListener { private final MasterServiceTaskQueue taskQueue; private final ClusterAdminClient client; private final Scheduler scheduler; + private final Executor executor; private final Set pendingNodes = Collections.synchronizedSet(new HashSet<>()); - public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient client, Scheduler scheduler) { + public TransportVersionsFixupListener(ClusterService service, ClusterAdminClient client, ThreadPool threadPool) { // there tends to be a lot of state operations on an upgrade - this one is not time-critical, // so use LOW priority. It just needs to be run at some point after upgrade. - this(service.createTaskQueue("fixup-transport-versions", Priority.LOW, new TransportVersionUpdater()), client, scheduler); + this( + service.createTaskQueue("fixup-transport-versions", Priority.LOW, new TransportVersionUpdater()), + client, + threadPool, + threadPool.executor(ThreadPool.Names.CLUSTER_COORDINATION) + ); } TransportVersionsFixupListener( MasterServiceTaskQueue taskQueue, ClusterAdminClient client, - Scheduler scheduler + Scheduler scheduler, + Executor executor ) { this.taskQueue = taskQueue; this.client = client; this.scheduler = scheduler; + this.executor = executor; } class NodeTransportVersionTask implements ClusterStateTaskListener { @@ -99,7 +110,10 @@ public ClusterState execute(BatchExecutionContext cont for (var c : context.taskContexts()) { for (var e : c.getTask().results().entrySet()) { // this node's transport version might have been updated already/node has gone away - TransportVersion recordedTv = builder.transportVersions().get(e.getKey()); + var cvMap = builder.compatibilityVersions(); + TransportVersion recordedTv = Optional.ofNullable(cvMap.get(e.getKey())) + .map(CompatibilityVersions::transportVersion) + .orElse(null); assert (recordedTv != null) || (context.initialState().nodes().nodeExists(e.getKey()) == false) : "Node " + e.getKey() + " is in the cluster but does not have an associated transport version recorded"; if (Objects.equals(recordedTv, INFERRED_TRANSPORT_VERSION)) { @@ -113,9 +127,9 @@ public ClusterState execute(BatchExecutionContext cont } } - @SuppressForbidden(reason = "maintaining ClusterState#transportVersions requires reading them") - private static Map getTransportVersions(ClusterState clusterState) { - return clusterState.transportVersions(); + @SuppressForbidden(reason = "maintaining ClusterState#compatibilityVersions requires reading them") + private static Map getCompatibilityVersions(ClusterState clusterState) { + return clusterState.compatibilityVersions(); } @Override @@ -129,9 +143,9 @@ public void clusterChanged(ClusterChangedEvent event) { && event.state().getMinTransportVersion().equals(INFERRED_TRANSPORT_VERSION)) { // find all the relevant nodes - Set nodes = getTransportVersions(event.state()).entrySet() + Set nodes = getCompatibilityVersions(event.state()).entrySet() .stream() - .filter(e -> e.getValue().equals(INFERRED_TRANSPORT_VERSION)) + .filter(e -> e.getValue().transportVersion().equals(INFERRED_TRANSPORT_VERSION)) .map(Map.Entry::getKey) .collect(Collectors.toSet()); @@ -142,7 +156,7 @@ public void clusterChanged(ClusterChangedEvent event) { private void scheduleRetry(Set nodes, int thisRetryNum) { // just keep retrying until this succeeds logger.debug("Scheduling retry {} for nodes {}", thisRetryNum + 1, nodes); - scheduler.schedule(() -> updateTransportVersions(nodes, thisRetryNum + 1), RETRY_TIME, ThreadPool.Names.CLUSTER_COORDINATION); + scheduler.schedule(() -> updateTransportVersions(nodes, thisRetryNum + 1), RETRY_TIME, executor); } private void updateTransportVersions(Set nodes, int retryNum) { diff --git a/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java new file mode 100644 index 0000000000000..5e7692e645d6a --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersions.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.version; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; + +import java.io.IOException; +import java.util.Comparator; +import java.util.Map; + +/** + * Wraps component version numbers for cluster state + * + *

Cluster state will need to carry version information for different independently versioned components. + * This wrapper lets us wrap these versions one level below {@link org.elasticsearch.cluster.ClusterState}. + * It's similar to {@link org.elasticsearch.cluster.node.VersionInformation}, but this class is meant to + * be constructed during node startup and hold values from plugins as well. + * + * @param transportVersion A transport version, usually a minimum compatible one for a node. + */ +public record CompatibilityVersions(TransportVersion transportVersion) implements Writeable, ToXContentFragment { + + /** + * Constructs a VersionWrapper collecting all the minimum versions from the values of the map. + * + * @param compatibilityVersions A map of strings (typically node identifiers) and versions wrappers + * @return Minimum versions for the cluster + */ + public static CompatibilityVersions minimumVersions(Map compatibilityVersions) { + return new CompatibilityVersions( + compatibilityVersions.values() + .stream() + .map(CompatibilityVersions::transportVersion) + .min(Comparator.naturalOrder()) + // In practice transportVersions is always nonempty (except in tests) but use a conservative default anyway: + .orElse(TransportVersions.MINIMUM_COMPATIBLE) + ); + } + + public static CompatibilityVersions readVersion(StreamInput in) throws IOException { + return new CompatibilityVersions(TransportVersion.readVersion(in)); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + TransportVersion.writeVersion(this.transportVersion(), out); + } + + /** + * Adds fields to the builder without starting an object. We expect this method to be called within an object that may + * already have a nodeId field. + * @param builder The builder for the XContent + * @param params Ignored here. + * @return The builder with fields for versions added + * @throws IOException if the builder can't accept what we try to add + */ + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field("transport_version", this.transportVersion().toString()); + return builder; + } +} diff --git a/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java b/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java index 1785ca96c4ab3..8c2b328ba1333 100644 --- a/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java +++ b/server/src/main/java/org/elasticsearch/common/FieldMemoryStats.java @@ -53,7 +53,7 @@ public void add(FieldMemoryStats fieldMemoryStats) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(stats, StreamOutput::writeString, StreamOutput::writeVLong); + out.writeMap(stats, StreamOutput::writeVLong); } /** diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java index 24e7c43533d62..d66b8b970437e 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobStore.java @@ -8,7 +8,9 @@ package org.elasticsearch.common.blobstore; import java.io.Closeable; +import java.io.IOException; import java.util.Collections; +import java.util.Iterator; import java.util.Map; /** @@ -21,6 +23,12 @@ public interface BlobStore extends Closeable { */ BlobContainer blobContainer(BlobPath path); + /** + * Delete all the provided blobs from the blob store. Each blob could belong to a different {@code BlobContainer} + * @param blobNames the blobs to be deleted + */ + void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException; + /** * Returns statistics on the count of operations that have been performed on this blob store */ diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index 30a0565039015..838d0e3f4d08c 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -181,32 +181,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO @Override public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { - IOException ioe = null; - long suppressedExceptions = 0; - while (blobNames.hasNext()) { - try { - Path resolve = path.resolve(blobNames.next()); - IOUtils.rm(resolve); - } catch (IOException e) { - // IOUtils.rm puts the original exception as a string in the IOException message. Ignore no such file exception. - if (e.getMessage().contains("NoSuchFileException") == false) { - // track up to 10 delete exceptions and try to continue deleting on exceptions - if (ioe == null) { - ioe = e; - } else if (ioe.getSuppressed().length < 10) { - ioe.addSuppressed(e); - } else { - ++suppressedExceptions; - } - } - } - } - if (ioe != null) { - if (suppressedExceptions > 0) { - ioe.addSuppressed(new IOException("Failed to delete files, suppressed [" + suppressedExceptions + "] failures")); - } - throw ioe; - } + blobStore.deleteBlobsIgnoringIfNotExists(Iterators.map(blobNames, blobName -> path.resolve(blobName).toString())); } @Override diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java index 784f691976726..77553ea21c5bf 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobStore.java @@ -12,10 +12,12 @@ import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.BlobStore; +import org.elasticsearch.core.IOUtils; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Iterator; import java.util.List; public class FsBlobStore implements BlobStore { @@ -61,6 +63,38 @@ public BlobContainer blobContainer(BlobPath path) { return new FsBlobContainer(this, path, f); } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + IOException ioe = null; + long suppressedExceptions = 0; + while (blobNames.hasNext()) { + try { + // FsBlobContainer uses this method to delete blobs; in that case each blob name is already an absolute path meaning that + // the resolution done here is effectively a non-op. + Path resolve = path.resolve(blobNames.next()); + IOUtils.rm(resolve); + } catch (IOException e) { + // IOUtils.rm puts the original exception as a string in the IOException message. Ignore no such file exception. + if (e.getMessage().contains("NoSuchFileException") == false) { + // track up to 10 delete exceptions and try to continue deleting on exceptions + if (ioe == null) { + ioe = e; + } else if (ioe.getSuppressed().length < 10) { + ioe.addSuppressed(e); + } else { + ++suppressedExceptions; + } + } + } + } + if (ioe != null) { + if (suppressedExceptions > 0) { + ioe.addSuppressed(new IOException("Failed to delete files, suppressed [" + suppressedExceptions + "] failures")); + } + throw ioe; + } + } + @Override public void close() { // nothing to do here... diff --git a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java index 436b9580d47f6..ea6256a5b0717 100644 --- a/server/src/main/java/org/elasticsearch/common/collect/Iterators.java +++ b/server/src/main/java/org/elasticsearch/common/collect/Iterators.java @@ -14,8 +14,10 @@ import java.util.Iterator; import java.util.NoSuchElementException; import java.util.Objects; +import java.util.function.BiPredicate; import java.util.function.Function; import java.util.function.IntFunction; +import java.util.function.ToIntFunction; public class Iterators { @@ -228,4 +230,36 @@ public U next() { } } + public static boolean equals(Iterator iterator1, Iterator iterator2, BiPredicate itemComparer) { + if (iterator1 == null) { + return iterator2 == null; + } + if (iterator2 == null) { + return false; + } + + while (iterator1.hasNext()) { + if (iterator2.hasNext() == false) { + return false; + } + + if (itemComparer.test(iterator1.next(), iterator2.next()) == false) { + return false; + } + } + + return iterator2.hasNext() == false; + } + + public static int hashCode(Iterator iterator, ToIntFunction itemHashcode) { + if (iterator == null) { + return 0; + } + int result = 1; + while (iterator.hasNext()) { + result = 31 * result + itemHashcode.applyAsInt(iterator.next()); + } + return result; + } + } diff --git a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java index 28ca7e0ee3420..bda33e28fa315 100644 --- a/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java +++ b/server/src/main/java/org/elasticsearch/common/compress/CompressedXContent.java @@ -9,7 +9,7 @@ package org.elasticsearch.common.compress; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.hash.MessageDigests; @@ -17,6 +17,7 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Releasable; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.ToXContentObject; @@ -46,7 +47,7 @@ * memory. Note that the compressed string might still sometimes need to be * decompressed in order to perform equality checks or to compute hash codes. */ -public final class CompressedXContent { +public final class CompressedXContent implements Writeable { private static final ThreadLocal inflater = ThreadLocal.withInitial(InflaterAndBuffer::new); @@ -204,7 +205,7 @@ public String getSha256() { public static CompressedXContent readCompressedString(StreamInput in) throws IOException { final String sha256; final byte[] compressedData; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { sha256 = in.readString(); compressedData = in.readByteArray(); } else { @@ -215,8 +216,9 @@ public static CompressedXContent readCompressedString(StreamInput in) throws IOE return new CompressedXContent(compressedData, sha256); } + @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeString(sha256); } else { int crc32 = crc32FromCompressed(bytes); diff --git a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java index bae4c2255d7fc..2cbb14141b7fc 100644 --- a/server/src/main/java/org/elasticsearch/common/document/DocumentField.java +++ b/server/src/main/java/org/elasticsearch/common/document/DocumentField.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.document; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -43,14 +43,14 @@ public class DocumentField implements Writeable, Iterable { public DocumentField(StreamInput in) throws IOException { name = in.readString(); - values = in.readList(StreamInput::readGenericValue); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { - ignoredValues = in.readList(StreamInput::readGenericValue); + values = in.readCollectionAsList(StreamInput::readGenericValue); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { + ignoredValues = in.readCollectionAsList(StreamInput::readGenericValue); } else { ignoredValues = Collections.emptyList(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { - lookupFields = in.readList(LookupField::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { + lookupFields = in.readCollectionAsList(LookupField::new); } else { lookupFields = List.of(); } @@ -114,11 +114,11 @@ public List getIgnoredValues() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeCollection(values, StreamOutput::writeGenericValue); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeCollection(ignoredValues, StreamOutput::writeGenericValue); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { - out.writeList(lookupFields); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { + out.writeCollection(lookupFields); } else { if (lookupFields.isEmpty() == false) { assert false : "Lookup fields require all nodes be on 8.2 or later"; diff --git a/server/src/main/java/org/elasticsearch/common/geo/BoundingBox.java b/server/src/main/java/org/elasticsearch/common/geo/BoundingBox.java index c1c2007b111e5..30963641f3fc1 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/BoundingBox.java +++ b/server/src/main/java/org/elasticsearch/common/geo/BoundingBox.java @@ -8,7 +8,7 @@ package org.elasticsearch.common.geo; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; import org.elasticsearch.geometry.Geometry; import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.geometry.ShapeType; @@ -27,7 +27,7 @@ * A class representing a Bounding-Box for use by Geo and Cartesian queries and aggregations * that deal with extents/rectangles representing rectangular areas of interest. */ -public abstract class BoundingBox implements ToXContentFragment, Writeable { +public abstract class BoundingBox implements ToXContentFragment, GenericNamedWriteable { static final ParseField TOP_RIGHT_FIELD = new ParseField("top_right"); static final ParseField BOTTOM_LEFT_FIELD = new ParseField("bottom_left"); static final ParseField TOP_FIELD = new ParseField("top"); diff --git a/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java b/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java index a4c9faa3852c0..11ba237a11145 100644 --- a/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java +++ b/server/src/main/java/org/elasticsearch/common/geo/GeoBoundingBox.java @@ -8,6 +8,8 @@ package org.elasticsearch.common.geo; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ParseField; @@ -91,6 +93,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGeoPoint(bottomRight); } + @Override + public final String getWriteableName() { + return "GeoBoundingBox"; + } + + @Override + public final TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_500_070; + } + protected static class GeoBoundsParser extends BoundsParser { GeoBoundsParser(XContentParser parser) { super(parser); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/GenericNamedWriteable.java b/server/src/main/java/org/elasticsearch/common/io/stream/GenericNamedWriteable.java new file mode 100644 index 0000000000000..632d919c4b3a4 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/common/io/stream/GenericNamedWriteable.java @@ -0,0 +1,15 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.common.io.stream; + +/** + * Marker interface that allows specific NamedWritable objects to be serialized as part of the + * generic serialization in StreamOutput and StreamInput. + */ +public interface GenericNamedWriteable extends VersionedNamedWriteable {} diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java index fb4dac435cbcb..f81e13cff5f0e 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/NamedWriteableAwareStreamInput.java @@ -33,7 +33,7 @@ public C readNamedWriteable(Class categoryClass) t } @Override - public List readNamedWriteableList(Class categoryClass) throws IOException { + public List readNamedWriteableCollectionAsList(Class categoryClass) throws IOException { int count = readArraySize(); if (count == 0) { return Collections.emptyList(); diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java index 6b43e59177cde..96240dd053edb 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java @@ -13,6 +13,8 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -47,6 +49,7 @@ import java.util.Collections; import java.util.Date; import java.util.EnumSet; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -663,7 +666,7 @@ private Map readMap(Writeable.Reader keyReader, Writeable.Reader * @return Never {@code null}. */ public Map> readMapOfLists(final Writeable.Reader valueReader) throws IOException { - return readMap(i -> i.readList(valueReader)); + return readMap(i -> i.readCollectionAsList(valueReader)); } /** @@ -759,12 +762,12 @@ public Object readGenericValue() throws IOException { case 4 -> readDouble(); case 5 -> readBoolean(); case 6 -> readByteArray(); - case 7 -> readArrayList(); + case 7 -> readCollection(StreamInput::readGenericValue, ArrayList::new, Collections.emptyList()); case 8 -> readArray(); - case 9 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) + case 9 -> getTransportVersion().onOrAfter(TransportVersions.V_8_7_0) ? readOrderedMap(StreamInput::readGenericValue, StreamInput::readGenericValue) : readOrderedMap(StreamInput::readString, StreamInput::readGenericValue); - case 10 -> getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) + case 10 -> getTransportVersion().onOrAfter(TransportVersions.V_8_7_0) ? readMap(StreamInput::readGenericValue, StreamInput::readGenericValue) : readMap(StreamInput::readGenericValue); case 11 -> readByte(); @@ -789,6 +792,7 @@ public Object readGenericValue() throws IOException { case 27 -> readOffsetTime(); case 28 -> readDuration(); case 29 -> readPeriod(); + case 30 -> readNamedWriteable(GenericNamedWriteable.class); default -> throw new IOException("Can't read unknown type [" + type + "]"); }; } @@ -810,18 +814,6 @@ public final Instant readOptionalInstant() throws IOException { return present ? readInstant() : null; } - private List readArrayList() throws IOException { - int size = readArraySize(); - if (size == 0) { - return Collections.emptyList(); - } - List list = new ArrayList<>(size); - for (int i = 0; i < size; i++) { - list.add(readGenericValue()); - } - return list; - } - private ZonedDateTime readZonedDateTime() throws IOException { final String timeZoneId = readString(); return ZonedDateTime.ofInstant(Instant.ofEpochMilli(readLong()), ZoneId.of(timeZoneId)); @@ -1077,24 +1069,17 @@ public C readOptionalNamedWriteable(Class category } /** - * Reads a list of objects. The list is expected to have been written using {@link StreamOutput#writeList(List)}. - * If the returned list contains any entries it will be mutable. If it is empty it might be immutable. - * - * @return the list of objects - * @throws IOException if an I/O exception occurs reading the list + * Reads a list of objects which was written using {@link StreamOutput#writeCollection}. If the returned list contains any entries it + * will be a (mutable) {@link ArrayList}. If it is empty it might be immutable. */ - public List readList(final Writeable.Reader reader) throws IOException { + public List readCollectionAsList(final Writeable.Reader reader) throws IOException { return readCollection(reader, ArrayList::new, Collections.emptyList()); } /** - * Reads an list of objects. The list is expected to have been written using {@link StreamOutput#writeList(List)}. - * The returned list is immutable. - * - * @return the list of objects - * @throws IOException if an I/O exception occurs reading the list + * Reads a list of objects which was written using {@link StreamOutput#writeCollection}. The returned list is immutable. */ - public List readImmutableList(final Writeable.Reader reader) throws IOException { + public List readCollectionAsImmutableList(final Writeable.Reader reader) throws IOException { int count = readArraySize(); // special cases small arrays, just like in java.util.List.of(...) return switch (count) { @@ -1114,63 +1099,53 @@ public List readImmutableList(final Writeable.Reader reader) throws IO } /** - * Same as {@link #readStringList()} but always returns an immutable list. - * - * @return immutable list of strings - * @throws IOException on failure + * Reads a list of strings which was written using {@link StreamOutput#writeStringCollection}. The returned list is immutable. */ - public List readImmutableStringList() throws IOException { - return readImmutableList(StreamInput::readString); + public List readStringCollectionAsImmutableList() throws IOException { + return readCollectionAsImmutableList(StreamInput::readString); } /** - * Reads a list of strings. The list is expected to have been written using {@link StreamOutput#writeStringCollection(Collection)}. - * If the returned list contains any entries it will be mutable. If it is empty it might be immutable. - * - * @return the list of strings - * @throws IOException if an I/O exception occurs reading the list + * Reads a list of strings which was written using {@link StreamOutput#writeStringCollection}. If the returned list contains any entries + * it will be a (mutable) {@link ArrayList}. If it is empty it might be immutable. */ - public List readStringList() throws IOException { - return readList(StreamInput::readString); + public List readStringCollectionAsList() throws IOException { + return readCollectionAsList(StreamInput::readString); } /** - * Reads an optional list. The list is expected to have been written using - * {@link StreamOutput#writeOptionalCollection(Collection)}. If the returned list contains any entries it will be mutable. - * If it is empty it might be immutable. + * Reads a possibly-{@code null} list which was written using {@link StreamOutput#writeOptionalCollection}. If the returned list + * contains any entries it will be a (mutable) {@link ArrayList}. If it is empty it might be immutable. */ - public List readOptionalList(final Writeable.Reader reader) throws IOException { + @Nullable + public List readOptionalCollectionAsList(final Writeable.Reader reader) throws IOException { final boolean isPresent = readBoolean(); - return isPresent ? readList(reader) : null; + return isPresent ? readCollectionAsList(reader) : null; } /** - * Reads an optional list of strings. The list is expected to have been written using - * {@link StreamOutput#writeOptionalStringCollection(Collection)}. If the returned list contains any entries it will be mutable. - * If it is empty it might be immutable. - * - * @return the list of strings - * @throws IOException if an I/O exception occurs reading the list + * Reads a possibly-{@code null} list of strings which was written using {@link StreamOutput#writeOptionalStringCollection}. If the + * returned list contains any entries it will be a (mutable) {@link ArrayList}. If it is empty it might be immutable. */ - public List readOptionalStringList() throws IOException { - return readOptionalList(StreamInput::readString); + @Nullable + public List readOptionalStringCollectionAsList() throws IOException { + return readOptionalCollectionAsList(StreamInput::readString); } /** - * Reads a set of objects. If the returned set contains any entries it will be mutable. If it is empty it might be immutable. + * Reads a set of objects which was written using {@link StreamOutput#writeCollection}. If the returned set contains any entries it + * will a (mutable) {@link HashSet}. If it is empty it might be immutable. The collection that was originally written should also have + * been a set. */ - public Set readSet(Writeable.Reader reader) throws IOException { + public Set readCollectionAsSet(Writeable.Reader reader) throws IOException { return readCollection(reader, Sets::newHashSetWithExpectedSize, Collections.emptySet()); } /** - * Reads a set of objects. The set is expected to have been written using {@link StreamOutput#writeCollection(Collection)}} with - * a collection that contains no duplicates. The returned set is immutable. - * - * @return the set of objects - * @throws IOException if an I/O exception occurs reading the set + * Reads a set of objects which was written using {@link StreamOutput#writeCollection}}. The returned set is immutable. The collection + * that was originally written should also have been a set. */ - public Set readImmutableSet(final Writeable.Reader reader) throws IOException { + public Set readCollectionAsImmutableSet(final Writeable.Reader reader) throws IOException { int count = readArraySize(); // special cases small arrays, just like in java.util.Set.of(...) return switch (count) { @@ -1190,7 +1165,33 @@ public Set readImmutableSet(final Writeable.Reader reader) throws IOEx } /** - * Reads a collection of objects + * Reads a list of {@link NamedWriteable}s which was written using {@link StreamOutput#writeNamedWriteableCollection}. If the returned + * list contains any entries it will be a (mutable) {@link ArrayList}. If it is empty it might be immutable. + */ + public List readNamedWriteableCollectionAsList(Class categoryClass) throws IOException { + throw new UnsupportedOperationException("can't read named writeable from StreamInput"); + } + + /** + * Reads a collection which was written using {@link StreamOutput#writeCollection}, accumulating the results using the provided + * consumer. + */ + public C readCollection(IntFunction constructor, CheckedBiConsumer itemConsumer) + throws IOException { + int count = readArraySize(); + var result = constructor.apply(count); + for (int i = 0; i < count; i++) { + itemConsumer.accept(this, result); + } + return result; + } + + /** + * Reads a collection, comprising a call to {@link #readVInt} for the size, followed by that many invocations of {@code reader}. + * + * @param reader reads each object in the collection + * @param constructor constructs the collection of the given (positive) size + * @param empty constructs an empty collection */ private > C readCollection(Writeable.Reader reader, IntFunction constructor, C empty) throws IOException { @@ -1202,17 +1203,11 @@ private > C readCollection(Writeable.Reader List readNamedWriteableList(Class categoryClass) throws IOException { - throw new UnsupportedOperationException("can't read named writeable from StreamInput"); - } - /** * Reads an enum with type E that was serialized based on the value of its ordinal */ diff --git a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java index f0456d8fbb04f..afe94da22d196 100644 --- a/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java +++ b/server/src/main/java/org/elasticsearch/common/io/stream/StreamOutput.java @@ -13,6 +13,8 @@ import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.geo.GeoPoint; @@ -47,6 +49,7 @@ import java.util.function.IntFunction; import static java.util.Map.entry; +import static org.elasticsearch.TransportVersions.V_8_500_070; /** * A stream from another node to this node. Technically, it can also be streamed from a byte array but that is mostly for testing. @@ -502,10 +505,7 @@ public void writeStringArrayNullable(@Nullable String[] array) throws IOExceptio if (array == null) { writeVInt(0); } else { - writeVInt(array.length); - for (String s : array) { - writeString(s); - } + writeStringArray(array); } } @@ -552,13 +552,10 @@ public void writeMapWithConsistentOrder(@Nullable Map assert false == (map instanceof LinkedHashMap); this.writeByte((byte) 10); this.writeVInt(map.size()); - Iterator> iterator = map.entrySet() - .stream() - .sorted((a, b) -> a.getKey().compareTo(b.getKey())) - .iterator(); + Iterator> iterator = map.entrySet().stream().sorted(Map.Entry.comparingByKey()).iterator(); while (iterator.hasNext()) { Map.Entry next = iterator.next(); - if (this.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (this.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.writeGenericValue(next.getKey()); } else { this.writeString(next.getKey()); @@ -578,34 +575,14 @@ public final void writeMapValues(final Map map, final Writer valueW * Writes values of a map as a collection */ public final void writeMapValues(final Map map) throws IOException { - writeMapValues(map, (o, v) -> v.writeTo(o)); - } - - /** - * Write a {@link Map} of {@code K}-type keys to {@code V}-type {@link List}s. - *

-     * Map<String, List<String>> map = ...;
-     * out.writeMapOfLists(map, StreamOutput::writeString, StreamOutput::writeString);
-     * 
- * - * @param keyWriter The key writer - * @param valueWriter The value writer - */ - public final void writeMapOfLists(final Map> map, final Writer keyWriter, final Writer valueWriter) - throws IOException { - writeMap(map, keyWriter, (stream, list) -> { - writeVInt(list.size()); - for (final V value : list) { - valueWriter.write(this, value); - } - }); + writeMapValues(map, StreamOutput::writeWriteable); } /** * Write a {@link Map} of {@code K}-type keys to {@code V}-type. */ public final void writeMap(final Map map) throws IOException { - writeMap(map, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + writeMap(map, StreamOutput::writeWriteable, StreamOutput::writeWriteable); } /** @@ -626,6 +603,13 @@ public final void writeMap(final Map map, final Writer keyWriter } } + /** + * Same as {@link #writeMap(Map, Writer, Writer)} but for {@code String} keys. + */ + public final void writeMap(final Map map, final Writer valueWriter) throws IOException { + writeMap(map, StreamOutput::writeString, valueWriter); + } + /** * Writes an {@link Instant} to the stream with nanosecond resolution */ @@ -678,10 +662,7 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep entry(Object[].class, (o, v) -> { o.writeByte((byte) 8); final Object[] list = (Object[]) v; - o.writeVInt(list.length); - for (Object item : list) { - o.writeGenericValue(item); - } + o.writeArray(StreamOutput::writeGenericValue, list); }), entry(Map.class, (o, v) -> { if (v instanceof LinkedHashMap) { @@ -689,13 +670,13 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep } else { o.writeByte((byte) 10); } - if (o.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (o.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { final Map map = (Map) v; o.writeMap(map, StreamOutput::writeGenericValue, StreamOutput::writeGenericValue); } else { @SuppressWarnings("unchecked") final Map map = (Map) v; - o.writeMap(map, StreamOutput::writeString, StreamOutput::writeGenericValue); + o.writeMap(map, StreamOutput::writeGenericValue); } }), entry(Byte.class, (o, v) -> { @@ -782,6 +763,24 @@ public final void writeOptionalInstant(@Nullable Instant instant) throws IOExcep o.writeInt(period.getYears()); o.writeInt(period.getMonths()); o.writeInt(period.getDays()); + }), + entry(GenericNamedWriteable.class, (o, v) -> { + // Note that we do not rely on the checks in VersionCheckingStreamOutput because that only applies to CCS + final var genericNamedWriteable = (GenericNamedWriteable) v; + TransportVersion minSupportedVersion = genericNamedWriteable.getMinimalSupportedVersion(); + assert minSupportedVersion.onOrAfter(V_8_500_070) : "[GenericNamedWriteable] requires [" + V_8_500_070 + "]"; + if (o.getTransportVersion().before(minSupportedVersion)) { + final var message = Strings.format( + "[%s] requires minimal transport version [%s] and cannot be sent using transport version [%s]", + genericNamedWriteable.getWriteableName(), + minSupportedVersion, + o.getTransportVersion() + ); + assert false : message; + throw new IllegalStateException(message); + } + o.writeByte((byte) 30); + o.writeNamedWriteable(genericNamedWriteable); }) ); @@ -812,6 +811,8 @@ private static Class getGenericType(Object value) { return Set.class; } else if (value instanceof BytesReference) { return BytesReference.class; + } else if (value instanceof GenericNamedWriteable) { + return GenericNamedWriteable.class; } else { return value.getClass(); } @@ -951,7 +952,7 @@ public void writeOptionalArray(final Writer writer, final @Nullable T[] a * integer is first written to the stream, and then the elements of the array are written to the stream. */ public void writeArray(T[] array) throws IOException { - writeArray((out, value) -> value.writeTo(out), array); + writeArray(StreamOutput::writeWriteable, array); } /** @@ -959,7 +960,7 @@ public void writeArray(T[] array) throws IOException { * serialized to indicate whether the array was null or not. */ public void writeOptionalArray(@Nullable T[] array) throws IOException { - writeOptionalArray((out, value) -> value.writeTo(out), array); + writeOptionalArray(StreamOutput::writeWriteable, array); } public void writeOptionalWriteable(@Nullable Writeable writeable) throws IOException { @@ -1031,28 +1032,16 @@ public void writeOptionalZoneId(@Nullable ZoneId timeZone) throws IOException { } /** - * Writes a collection to this stream. The corresponding collection can be read from a stream input using - * {@link StreamInput#readList(Writeable.Reader)}. - * - * @param collection the collection to write to this stream - * @throws IOException if an I/O exception occurs writing the collection + * Writes a collection which can then be read using {@link StreamInput#readCollectionAsList} or another {@code readCollectionAs*} + * method. Make sure to read the collection back into the same type as was originally written. */ public void writeCollection(final Collection collection) throws IOException { - writeCollection(collection, (o, v) -> v.writeTo(o)); - } - - /** - * Writes a list of {@link Writeable} objects - */ - public void writeList(List list) throws IOException { - writeCollection(list); + writeCollection(collection, StreamOutput::writeWriteable); } /** - * Writes a collection of objects via a {@link Writer}. - * - * @param collection the collection of objects - * @throws IOException if an I/O exception occurs writing the collection + * Writes a collection which can then be read using {@link StreamInput#readCollectionAsList} or another {@code readCollectionAs*} + * method. Make sure to read the collection back into the same type as was originally written. */ public void writeCollection(final Collection collection, final Writer writer) throws IOException { writeVInt(collection.size()); @@ -1062,29 +1051,24 @@ public void writeCollection(final Collection collection, final Writer } /** - * Writes a collection of a strings. The corresponding collection can be read from a stream input using - * {@link StreamInput#readList(Writeable.Reader)}. - * - * @param collection the collection of strings - * @throws IOException if an I/O exception occurs writing the collection + * Writes a collection of strings which can then be read using {@link StreamInput#readStringCollectionAsList} or another {@code + * readStringCollectionAs*} method. Make sure to read the collection back into the same type as was originally written. */ public void writeStringCollection(final Collection collection) throws IOException { writeCollection(collection, StreamOutput::writeString); } /** - * Writes an optional collection. The corresponding collection can be read from a stream input using - * {@link StreamInput#readOptionalList(Writeable.Reader)}. + * Writes a possibly-{@code null} collection which can then be read using {@link StreamInput#readOptionalCollectionAsList}. */ - public void writeOptionalCollection(final Collection collection) throws IOException { - writeOptionalCollection(collection, (o, v) -> v.writeTo(o)); + public void writeOptionalCollection(@Nullable final Collection collection) throws IOException { + writeOptionalCollection(collection, StreamOutput::writeWriteable); } /** - * Writes an optional collection via {@link Writer}. The corresponding collection can be read from a stream input using - * {@link StreamInput#readOptionalList(Writeable.Reader)}. + * Writes a possibly-{@code null} collection which can then be read using {@link StreamInput#readOptionalCollectionAsList}. */ - public void writeOptionalCollection(final Collection collection, final Writer writer) throws IOException { + public void writeOptionalCollection(@Nullable final Collection collection, final Writer writer) throws IOException { if (collection != null) { writeBoolean(true); writeCollection(collection, writer); @@ -1094,24 +1078,19 @@ public void writeOptionalCollection(final Collection collection, final Wr } /** - * Writes an optional collection of a strings. The corresponding collection can be read from a stream input using - * {@link StreamInput#readList(Writeable.Reader)}. - * - * @param collection the collection of strings - * @throws IOException if an I/O exception occurs writing the collection + * Writes a possibly-{@code null} collection of strings which can then be read using + * {@link StreamInput#readOptionalStringCollectionAsList}. */ - public void writeOptionalStringCollection(final Collection collection) throws IOException { + public void writeOptionalStringCollection(@Nullable final Collection collection) throws IOException { writeOptionalCollection(collection, StreamOutput::writeString); } /** - * Writes a list of {@link NamedWriteable} objects. + * Writes a collection of {@link NamedWriteable} objects which can then be read using {@link + * StreamInput#readNamedWriteableCollectionAsList}. */ - public void writeNamedWriteableList(List list) throws IOException { - writeVInt(list.size()); - for (NamedWriteable obj : list) { - writeNamedWriteable(obj); - } + public void writeNamedWriteableCollection(Collection list) throws IOException { + writeCollection(list, StreamOutput::writeNamedWriteable); } /** diff --git a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java index 6a7e685b9d4f2..d46e54de8729e 100644 --- a/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java +++ b/server/src/main/java/org/elasticsearch/common/network/NetworkModule.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.Supplier; @@ -262,7 +263,7 @@ private CompositeTransportInterceptor(List transportInterc @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 01e4824579b90..4cd2ff52cf29a 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -72,6 +72,8 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexMetadata.INDEX_FORMAT_SETTING, IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME, IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_UUID, + IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME, + IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID, IndexMetadata.INDEX_DOWNSAMPLE_STATUS, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_DEBUG_SETTING, SearchSlowLog.INDEX_SEARCH_SLOWLOG_THRESHOLD_FETCH_WARN_SETTING, diff --git a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java index 4a00cc1250461..6bdec2380c344 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java +++ b/server/src/main/java/org/elasticsearch/common/settings/KeyStoreWrapper.java @@ -637,7 +637,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(hasPassword); out.writeOptionalByteArray(dataBytes); var entriesMap = entries.get(); - out.writeMap((entriesMap == null) ? Map.of() : entriesMap, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap((entriesMap == null) ? Map.of() : entriesMap, StreamOutput::writeWriteable); out.writeBoolean(closed); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java index ddbf5ec2bbc96..0b7799ae572a5 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java +++ b/server/src/main/java/org/elasticsearch/common/settings/LocallyMountedSecrets.java @@ -264,7 +264,7 @@ public static LocalFileSecrets readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { assert out.getTransportVersion() == TransportVersion.current(); - out.writeMap((entries == null) ? Map.of() : entries, StreamOutput::writeString, StreamOutput::writeByteArray); + out.writeMap((entries == null) ? Map.of() : entries, StreamOutput::writeByteArray); metadata.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/Settings.java b/server/src/main/java/org/elasticsearch/common/settings/Settings.java index 67e2c0e1ac748..be8292f02bb59 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/Settings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/Settings.java @@ -645,7 +645,7 @@ public Diff diff(Settings previousState) { @Override public void writeTo(StreamOutput out) throws IOException { // pull settings to exclude secure settings in size() - out.writeMap(settings, StreamOutput::writeString, Settings::writeSettingValue); + out.writeMap(settings, Settings::writeSettingValue); } private static void writeSettingValue(StreamOutput streamOutput, Object value) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/common/unit/Processors.java b/server/src/main/java/org/elasticsearch/common/unit/Processors.java index 0e0f88ac7f9c5..d5421e1475655 100644 --- a/server/src/main/java/org/elasticsearch/common/unit/Processors.java +++ b/server/src/main/java/org/elasticsearch/common/unit/Processors.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.unit; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,9 +32,9 @@ public class Processors implements Writeable, Comparable, ToXContent public static final Processors MAX_PROCESSORS = new Processors(Double.MAX_VALUE); public static final Version FLOAT_PROCESSORS_SUPPORT_VERSION = Version.V_8_3_0; - public static final TransportVersion FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion FLOAT_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; public static final Version DOUBLE_PROCESSORS_SUPPORT_VERSION = Version.V_8_5_0; - public static final TransportVersion DOUBLE_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersion.V_8_5_0; + public static final TransportVersion DOUBLE_PROCESSORS_SUPPORT_TRANSPORT_VERSION = TransportVersions.V_8_5_0; static final int NUMBER_OF_DECIMAL_PLACES = 5; private static final double MIN_REPRESENTABLE_PROCESSORS = 1E-5; diff --git a/server/src/main/java/org/elasticsearch/common/util/BitArray.java b/server/src/main/java/org/elasticsearch/common/util/BitArray.java index 051160a81d1b0..696e81b3beec9 100644 --- a/server/src/main/java/org/elasticsearch/common/util/BitArray.java +++ b/server/src/main/java/org/elasticsearch/common/util/BitArray.java @@ -8,6 +8,8 @@ package org.elasticsearch.common.util; +import org.apache.lucene.util.Accountable; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -17,7 +19,10 @@ * The underlying long array grows lazily based on the biggest index * that needs to be set. */ -public final class BitArray implements Releasable { +public final class BitArray implements Accountable, Releasable { + + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BitArray.class); + private final BigArrays bigArrays; private LongArray bits; @@ -132,6 +137,11 @@ private static long bitmask(long index) { return 1L << index; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(bits); + } + @Override public void close() { Releasables.close(bits); diff --git a/server/src/main/java/org/elasticsearch/common/util/Maps.java b/server/src/main/java/org/elasticsearch/common/util/Maps.java index 5fcdde4e0b579..da5089983ceb5 100644 --- a/server/src/main/java/org/elasticsearch/common/util/Maps.java +++ b/server/src/main/java/org/elasticsearch/common/util/Maps.java @@ -151,9 +151,29 @@ public static boolean deepEquals(Map left, Map right) { if (left == null || right == null || left.size() != right.size()) { return false; } - return left.entrySet() - .stream() - .allMatch(e -> right.containsKey(e.getKey()) && Objects.deepEquals(e.getValue(), right.get(e.getKey()))); + + for (Map.Entry e : left.entrySet()) { + if (right.containsKey(e.getKey()) == false) { + return false; + } + + V v1 = e.getValue(); + V v2 = right.get(e.getKey()); + if (v1 instanceof Map && v2 instanceof Map) { + // if the values are both maps, then recursively compare them with Maps.deepEquals + @SuppressWarnings("unchecked") + Map m1 = (Map) v1; + @SuppressWarnings("unchecked") + Map m2 = (Map) v2; + if (Maps.deepEquals(m1, m2) == false) { + return false; + } + } else if (Objects.deepEquals(v1, v2) == false) { + return false; + } + } + + return true; } /** diff --git a/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java b/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java index 88566acd46ae1..66a4d20f89a4c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java +++ b/server/src/main/java/org/elasticsearch/common/util/SetBackedScalingCuckooFilter.java @@ -111,9 +111,9 @@ public SetBackedScalingCuckooFilter(StreamInput in, Random rng) throws IOExcepti this.fpp = in.readDouble(); if (isSetMode) { - this.hashes = in.readSet(StreamInput::readZLong); + this.hashes = in.readCollectionAsSet(StreamInput::readZLong); } else { - this.filters = in.readList(in12 -> new CuckooFilter(in12, rng)); + this.filters = in.readCollectionAsList(in12 -> new CuckooFilter(in12, rng)); this.numBuckets = filters.get(0).getNumBuckets(); this.fingerprintMask = filters.get(0).getFingerprintMask(); this.bitsPerEntry = filters.get(0).getBitsPerEntry(); @@ -129,7 +129,7 @@ public void writeTo(StreamOutput out) throws IOException { if (isSetMode) { out.writeCollection(hashes, StreamOutput::writeZLong); } else { - out.writeList(filters); + out.writeCollection(filters); } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java index 031ff232351a8..add11b03c5f1c 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTask.java @@ -14,6 +14,7 @@ import java.io.Closeable; import java.util.Objects; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import static org.elasticsearch.core.Strings.format; @@ -25,6 +26,7 @@ public abstract class AbstractAsyncTask implements Runnable, Closeable { private final Logger logger; private final ThreadPool threadPool; + private final Executor executor; private final AtomicBoolean closed = new AtomicBoolean(false); private final boolean autoReschedule; private volatile Scheduler.Cancellable cancellable; @@ -32,9 +34,10 @@ public abstract class AbstractAsyncTask implements Runnable, Closeable { private volatile Exception lastThrownException; private volatile TimeValue interval; - protected AbstractAsyncTask(Logger logger, ThreadPool threadPool, TimeValue interval, boolean autoReschedule) { + protected AbstractAsyncTask(Logger logger, ThreadPool threadPool, Executor executor, TimeValue interval, boolean autoReschedule) { this.logger = logger; this.threadPool = threadPool; + this.executor = executor; this.interval = interval; this.autoReschedule = autoReschedule; } @@ -81,7 +84,7 @@ public synchronized void rescheduleIfNecessary() { if (logger.isTraceEnabled()) { logger.trace("scheduling {} every {}", toString(), interval); } - cancellable = threadPool.schedule(this, interval, getThreadPool()); + cancellable = threadPool.schedule(this, interval, executor); isScheduledOrRunning = true; } else { logger.trace("scheduled {} disabled", toString()); @@ -167,12 +170,4 @@ private static boolean sameException(Exception left, Exception right) { } protected abstract void runInternal(); - - /** - * Use the same threadpool by default. - * Derived classes can change this if required. - */ - protected String getThreadPool() { - return ThreadPool.Names.SAME; - } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java index e14f2d6463fa2..a43fc04cb0460 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/RunOnce.java @@ -7,23 +7,35 @@ */ package org.elasticsearch.common.util.concurrent; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; import java.util.Objects; -import java.util.concurrent.atomic.AtomicReference; /** * Runnable that prevents running its delegate more than once. */ public class RunOnce implements Runnable { - private final AtomicReference delegateRef; + private static final VarHandle VH_DELEGATE_FIELD; + + static { + try { + VH_DELEGATE_FIELD = MethodHandles.lookup().in(RunOnce.class).findVarHandle(RunOnce.class, "delegate", Runnable.class); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + @SuppressWarnings("FieldMayBeFinal") // updated via VH_DELEGATE_FIELD (and _only_ via VH_DELEGATE_FIELD) + private volatile Runnable delegate; public RunOnce(final Runnable delegate) { - delegateRef = new AtomicReference<>(Objects.requireNonNull(delegate)); + this.delegate = Objects.requireNonNull(delegate); } @Override public void run() { - var acquired = delegateRef.getAndSet(null); + var acquired = (Runnable) VH_DELEGATE_FIELD.compareAndExchange(this, delegate, null); if (acquired != null) { acquired.run(); } @@ -33,11 +45,11 @@ public void run() { * {@code true} if the {@link RunOnce} has been executed once. */ public boolean hasRun() { - return delegateRef.get() == null; + return delegate == null; } @Override public String toString() { - return "RunOnce[" + delegateRef.get() + "]"; + return "RunOnce[" + delegate + "]"; } } diff --git a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java index 551af04e1956f..b59ebc00e55c4 100644 --- a/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java +++ b/server/src/main/java/org/elasticsearch/common/util/concurrent/ThreadContext.java @@ -892,8 +892,8 @@ private void writeTo(StreamOutput out, Map defaultHeaders) throw requestHeaders.putAll(this.requestHeaders); } - out.writeMap(requestHeaders, StreamOutput::writeString, StreamOutput::writeString); - out.writeMap(responseHeaders, StreamOutput::writeString, StreamOutput::writeStringCollection); + out.writeMap(requestHeaders, StreamOutput::writeString); + out.writeMap(responseHeaders, StreamOutput::writeStringCollection); } } diff --git a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java index 3bd9cfee16775..4ac8f34571624 100644 --- a/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java +++ b/server/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchGenerationException; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -643,7 +643,7 @@ public static BytesReference childBytes(XContentParser parser) throws IOExceptio * @param xContentType an instance to serialize */ public static void writeTo(StreamOutput out, XContentType xContentType) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // when sending an enumeration to > joinValidators = new ArrayList<>(); final Map> hostProviders = new HashMap<>(); @@ -210,7 +212,8 @@ public DiscoveryModule( circuitBreakerService, reconfigurator, leaderHeartbeatService, - preVoteCollectorFactory + preVoteCollectorFactory, + compatibilityVersions ); } else { throw new IllegalArgumentException("Unknown discovery type [" + discoveryType + "]"); diff --git a/server/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java b/server/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java index 8d1368555a72c..09f52887206fa 100644 --- a/server/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java +++ b/server/src/main/java/org/elasticsearch/discovery/DiscoveryStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.discovery; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.coordination.PendingClusterStateStats; import org.elasticsearch.cluster.coordination.PublishClusterStateStats; import org.elasticsearch.cluster.service.ClusterApplierRecordingService; @@ -43,12 +43,12 @@ public DiscoveryStats( public DiscoveryStats(StreamInput in) throws IOException { queueStats = in.readOptionalWriteable(PendingClusterStateStats::new); publishStats = in.readOptionalWriteable(PublishClusterStateStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { clusterStateUpdateStats = in.readOptionalWriteable(ClusterStateUpdateStats::new); } else { clusterStateUpdateStats = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { applierRecordingStats = in.readOptionalWriteable(ClusterApplierRecordingService.Stats::new); } else { applierRecordingStats = null; @@ -59,10 +59,10 @@ public DiscoveryStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(queueStats); out.writeOptionalWriteable(publishStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalWriteable(clusterStateUpdateStats); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalWriteable(applierRecordingStats); } } diff --git a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java index d82d15fe7c55c..046f0a1c64bb5 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeerFinder.java @@ -312,7 +312,7 @@ private boolean handleWakeUp() { } }); - transportService.getThreadPool().scheduleUnlessShuttingDown(findPeersInterval, Names.CLUSTER_COORDINATION, new Runnable() { + transportService.getThreadPool().scheduleUnlessShuttingDown(findPeersInterval, clusterCoordinationExecutor, new Runnable() { @Override public void run() { synchronized (mutex) { diff --git a/server/src/main/java/org/elasticsearch/discovery/PeersRequest.java b/server/src/main/java/org/elasticsearch/discovery/PeersRequest.java index 78e35ebcf78a7..3f20c049f2957 100644 --- a/server/src/main/java/org/elasticsearch/discovery/PeersRequest.java +++ b/server/src/main/java/org/elasticsearch/discovery/PeersRequest.java @@ -30,14 +30,14 @@ public PeersRequest(DiscoveryNode sourceNode, List knownPeers) { public PeersRequest(StreamInput in) throws IOException { super(in); sourceNode = new DiscoveryNode(in); - knownPeers = in.readList(DiscoveryNode::new); + knownPeers = in.readCollectionAsList(DiscoveryNode::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); sourceNode.writeTo(out); - out.writeList(knownPeers); + out.writeCollection(knownPeers); } public List getKnownPeers() { diff --git a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java index 758690e41699a..2ffadfb26a985 100644 --- a/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java +++ b/server/src/main/java/org/elasticsearch/gateway/ClusterStateUpdaters.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -19,6 +18,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingRoleStrategy; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.ClusterSettings; import java.util.Map; @@ -29,10 +29,14 @@ public class ClusterStateUpdaters { private static final Logger logger = LogManager.getLogger(ClusterStateUpdaters.class); - public static ClusterState setLocalNode(ClusterState clusterState, DiscoveryNode localNode, TransportVersion transportVersion) { + public static ClusterState setLocalNode( + ClusterState clusterState, + DiscoveryNode localNode, + CompatibilityVersions compatibilityVersions + ) { return ClusterState.builder(clusterState) .nodes(DiscoveryNodes.builder().add(localNode).localNodeId(localNode.getId()).build()) - .putTransportVersion(localNode.getId(), transportVersion) + .compatibilityVersions(Map.of(localNode.getId(), compatibilityVersions)) .build(); } diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java index 8fe86cc1a2ada..335b63d656b1a 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayMetaState.java @@ -14,7 +14,6 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -28,6 +27,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.EsExecutors; @@ -97,7 +97,8 @@ public void start( IndexMetadataVerifier indexMetadataVerifier, MetadataUpgrader metadataUpgrader, PersistedClusterStateService persistedClusterStateService, - List clusterCoordinationPlugins + List clusterCoordinationPlugins, + CompatibilityVersions compatibilityVersions ) { assert persistedState.get() == null : "should only start once, but already have " + persistedState.get(); try { @@ -110,7 +111,8 @@ public void start( indexMetadataVerifier, metadataUpgrader, persistedClusterStateService, - clusterCoordinationPlugins + clusterCoordinationPlugins, + compatibilityVersions ) ); } catch (IOException e) { @@ -126,7 +128,8 @@ private PersistedState createPersistedState( IndexMetadataVerifier indexMetadataVerifier, MetadataUpgrader metadataUpgrader, PersistedClusterStateService persistedClusterStateService, - List clusterCoordinationPlugins + List clusterCoordinationPlugins, + CompatibilityVersions compatibilityVersions ) throws IOException { final var persistedStateFactories = clusterCoordinationPlugins.stream() .map(ClusterCoordinationPlugin::getPersistedStateFactory) @@ -149,11 +152,19 @@ private PersistedState createPersistedState( metaStateService, indexMetadataVerifier, metadataUpgrader, - persistedClusterStateService + persistedClusterStateService, + compatibilityVersions ); } - return createInMemoryPersistedState(settings, transportService, clusterService, metaStateService, persistedClusterStateService); + return createInMemoryPersistedState( + settings, + transportService, + clusterService, + metaStateService, + persistedClusterStateService, + compatibilityVersions + ); } private PersistedState createOnDiskPersistedState( @@ -163,7 +174,8 @@ private PersistedState createOnDiskPersistedState( MetaStateService metaStateService, IndexMetadataVerifier indexMetadataVerifier, MetadataUpgrader metadataUpgrader, - PersistedClusterStateService persistedClusterStateService + PersistedClusterStateService persistedClusterStateService, + CompatibilityVersions compatibilityVersions ) throws IOException { final PersistedClusterStateService.OnDiskState onDiskState = persistedClusterStateService.loadBestOnDiskState(); @@ -190,7 +202,8 @@ private PersistedState createOnDiskPersistedState( ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)) .version(lastAcceptedVersion) .metadata(upgradeMetadataForNode(metadata, indexMetadataVerifier, metadataUpgrader)) - .build() + .build(), + compatibilityVersions ); if (DiscoveryNode.isMasterNode(settings)) { persistedState = new LucenePersistedState(persistedClusterStateService, currentTerm, clusterState); @@ -226,13 +239,15 @@ private PersistedState createInMemoryPersistedState( TransportService transportService, ClusterService clusterService, MetaStateService metaStateService, - PersistedClusterStateService persistedClusterStateService + PersistedClusterStateService persistedClusterStateService, + CompatibilityVersions compatibilityVersions ) throws IOException { final long currentTerm = 0L; final ClusterState clusterState = prepareInitialClusterState( transportService, clusterService, - ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).build() + ClusterState.builder(ClusterName.CLUSTER_NAME_SETTING.get(settings)).build(), + compatibilityVersions ); if (persistedClusterStateService.getDataPaths().length > 0) { // write empty cluster state just so that we have a persistent node id. There is no need to write out global metadata with @@ -252,12 +267,17 @@ private PersistedState createInMemoryPersistedState( } // exposed so it can be overridden by tests - ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { + ClusterState prepareInitialClusterState( + TransportService transportService, + ClusterService clusterService, + ClusterState clusterState, + CompatibilityVersions compatibilityVersions + ) { assert clusterState.nodes().getLocalNode() == null : "prepareInitialClusterState must only be called once"; assert transportService.getLocalNode() != null : "transport service is not yet started"; return Function.identity() .andThen(ClusterStateUpdaters::addStateNotRecoveredBlock) - .andThen(state -> ClusterStateUpdaters.setLocalNode(state, transportService.getLocalNode(), TransportVersion.current())) + .andThen(state -> ClusterStateUpdaters.setLocalNode(state, transportService.getLocalNode(), compatibilityVersions)) .andThen(state -> ClusterStateUpdaters.upgradeAndArchiveUnknownOrInvalidSettings(state, clusterService.getClusterSettings())) .andThen(ClusterStateUpdaters::recoverClusterBlocks) .apply(clusterState); diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java index 3bf6e0f15164e..bb0ffe0ee1c8d 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayService.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayService.java @@ -188,7 +188,7 @@ protected void doRun() { runRecovery(); } } - }, recoverAfterTime, ThreadPool.Names.GENERIC); + }, recoverAfterTime, threadPool.generic()); } } else { if (recoveryInProgress.compareAndSet(false, true)) { diff --git a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java index 89050af671dd0..d0579342f5bd0 100644 --- a/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java +++ b/server/src/main/java/org/elasticsearch/gateway/LocalAllocateDangledIndices.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.support.ChannelActionListener; @@ -273,7 +273,7 @@ public void writeTo(StreamOutput out) throws IOException { public static class AllocateDangledResponse extends TransportResponse { private AllocateDangledResponse(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readBoolean(); } } @@ -282,7 +282,7 @@ private AllocateDangledResponse() {} @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeBoolean(true); } } diff --git a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java index ec374ade8a643..855eb76a07add 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetadataStateFormat.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; +import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -222,6 +223,7 @@ public final long write(final T state, final Path... locations) throws WriteStat } private long write(final T state, boolean cleanup, final Path... locations) throws WriteStateException { + assert Transports.assertNotTransportThread("MetadataStateFormat#write does IO and must not run on transport thread"); if (locations == null) { throw new IllegalArgumentException("Locations must not be null"); } diff --git a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java index 3b594fa83e70a..0244e3bf77d60 100644 --- a/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java +++ b/server/src/main/java/org/elasticsearch/gateway/TransportNodesListGatewayStartedShards.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -190,7 +190,7 @@ public static class Request extends BaseNodesRequest { public Request(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -221,7 +221,7 @@ public String getCustomDataPath() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeString(customDataPath); } } @@ -243,12 +243,12 @@ public NodesGatewayStartedShards( @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeGatewayStartedShards::new); + return in.readCollectionAsList(NodeGatewayStartedShards::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } @@ -261,7 +261,7 @@ public static class NodeRequest extends TransportRequest { public NodeRequest(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { customDataPath = in.readString(); } else { customDataPath = null; @@ -277,7 +277,7 @@ public NodeRequest(Request request) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); shardId.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { assert customDataPath != null; out.writeString(customDataPath); } diff --git a/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java b/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java index 867df43aece32..29d38e863f5cf 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/health/HealthIndicatorService.java @@ -55,4 +55,15 @@ default HealthIndicatorResult createIndicator( .collect(Collectors.toList()); return new HealthIndicatorResult(name(), status, symptom, details, impactsList, diagnosisList); } + + /** + * A preflight indicator is an indicator that is run first and represents a serious cascading health problem. For example, the + * `stable_master` health indicator is a preflight indicator. When it is red it means that the node has witnessed too many master nodes + * which could mean there are missing nodes, or a discovery problem, or that the node itself has problems joining the elected master. + * For these reasons, it likely that the cluster state is not up-to-date enough for us to make health decisions off of it. + * @return true if this is a preflight indicator, false otherwise. + */ + default boolean isPreflight() { + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/health/HealthService.java b/server/src/main/java/org/elasticsearch/health/HealthService.java index 7828f12e7aba2..f4b71247d6291 100644 --- a/server/src/main/java/org/elasticsearch/health/HealthService.java +++ b/server/src/main/java/org/elasticsearch/health/HealthService.java @@ -46,28 +46,23 @@ public class HealthService { */ private static final String REASON = "reasons"; + // Indicators that are run first and represent a serious cascading health problem private final List preflightHealthIndicatorServices; + // Indicators that are run if the preflight indicators return GREEN results private final List healthIndicatorServices; private final ThreadPool threadPool; /** * Creates a new HealthService. - * + * Accepts a list of regular indicator services and a list of preflight indicator services. Preflight indicators are run first and * represent serious cascading health problems. If any of these preflight indicators are not GREEN status, all remaining indicators are * likely to be degraded in some way or will not be able to calculate their state correctly. The remaining health indicators will return * UNKNOWN statuses in this case. - * - * @param preflightHealthIndicatorServices indicators that are run first and represent a serious cascading health problem. - * @param healthIndicatorServices indicators that are run if the preflight indicators return GREEN results. */ - public HealthService( - List preflightHealthIndicatorServices, - List healthIndicatorServices, - ThreadPool threadPool - ) { - this.preflightHealthIndicatorServices = preflightHealthIndicatorServices; - this.healthIndicatorServices = healthIndicatorServices; + public HealthService(List healthIndicatorServices, ThreadPool threadPool) { + this.preflightHealthIndicatorServices = healthIndicatorServices.stream().filter(HealthIndicatorService::isPreflight).toList(); + this.healthIndicatorServices = healthIndicatorServices.stream().filter(indicator -> indicator.isPreflight() == false).toList(); this.threadPool = threadPool; } diff --git a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java index 9ae58b5ef849e..6577171202169 100644 --- a/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java +++ b/server/src/main/java/org/elasticsearch/health/metadata/HealthMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.health.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -62,7 +63,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_5_0; + return TransportVersions.V_8_5_0; } @Override @@ -161,7 +162,7 @@ public record ShardLimits(int maxShardsPerNode, int maxShardsPerNodeFrozen) impl private static final String TYPE = "shard_limits"; private static final ParseField MAX_SHARDS_PER_NODE = new ParseField("max_shards_per_node"); private static final ParseField MAX_SHARDS_PER_NODE_FROZEN = new ParseField("max_shards_per_node_frozen"); - static final TransportVersion VERSION_SUPPORTING_SHARD_LIMIT_FIELDS = TransportVersion.V_8_8_0; + static final TransportVersion VERSION_SUPPORTING_SHARD_LIMIT_FIELDS = TransportVersions.V_8_8_0; static ShardLimits readFrom(StreamInput in) throws IOException { return new ShardLimits(in.readInt(), in.readInt()); @@ -230,7 +231,7 @@ public record Disk( ) implements ToXContentFragment, Writeable { public static final String TYPE = "disk"; - public static final TransportVersion VERSION_SUPPORTING_HEADROOM_FIELDS = TransportVersion.V_8_5_0; + public static final TransportVersion VERSION_SUPPORTING_HEADROOM_FIELDS = TransportVersions.V_8_5_0; private static final ParseField HIGH_WATERMARK_FIELD = new ParseField("high_watermark"); private static final ParseField HIGH_MAX_HEADROOM_FIELD = new ParseField("high_max_headroom"); diff --git a/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java b/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java index f83d5916a792e..e8e9dd9747a9f 100644 --- a/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java +++ b/server/src/main/java/org/elasticsearch/health/node/HealthInfo.java @@ -28,6 +28,6 @@ public HealthInfo(StreamInput input) throws IOException { @Override public void writeTo(StreamOutput output) throws IOException { - output.writeMap(diskInfoByNode, StreamOutput::writeString, (out, diskHealthInfo) -> diskHealthInfo.writeTo(out)); + output.writeMap(diskInfoByNode, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java index e1ea7728acf51..8d25233a1d6e5 100644 --- a/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java +++ b/server/src/main/java/org/elasticsearch/health/node/LocalHealthMonitor.java @@ -45,6 +45,7 @@ import java.util.Objects; import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; import static org.elasticsearch.core.Strings.format; @@ -241,7 +242,7 @@ DiskHealthInfo getLastReportedDiskHealthInfo() { static class Monitoring implements Runnable, Scheduler.Cancellable { private final TimeValue interval; - private final String executor; + private final Executor executor; private final Scheduler scheduler; private final ClusterService clusterService; private final DiskCheck diskCheck; @@ -256,7 +257,7 @@ static class Monitoring implements Runnable, Scheduler.Cancellable { private Monitoring( TimeValue interval, Scheduler scheduler, - String executor, + Executor executor, AtomicReference lastReportedDiskHealthInfo, AtomicReference lastSeenHealthNode, DiskCheck diskCheck, @@ -278,7 +279,7 @@ private Monitoring( */ static Monitoring start( TimeValue interval, - Scheduler scheduler, + ThreadPool threadPool, AtomicReference lastReportedDiskHealthInfo, AtomicReference lastSeenHealthNode, DiskCheck diskCheck, @@ -287,15 +288,15 @@ static Monitoring start( ) { Monitoring monitoring = new Monitoring( interval, - scheduler, - ThreadPool.Names.MANAGEMENT, + threadPool, + threadPool.executor(ThreadPool.Names.MANAGEMENT), lastReportedDiskHealthInfo, lastSeenHealthNode, diskCheck, clusterService, client ); - monitoring.scheduledRun = scheduler.schedule(monitoring, TimeValue.ZERO, monitoring.executor); + monitoring.scheduledRun = threadPool.schedule(monitoring, TimeValue.ZERO, monitoring.executor); return monitoring; } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java index 4a68115af6348..a18f9c5c10b33 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskParams.java @@ -9,6 +9,7 @@ package org.elasticsearch.health.node.selection; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.persistent.PersistentTaskParams; @@ -47,7 +48,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_5_0; + return TransportVersions.V_8_5_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java index 25714f3992c72..61894beade382 100644 --- a/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java +++ b/server/src/main/java/org/elasticsearch/health/stats/HealthApiStatsAction.java @@ -89,12 +89,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::new); + return in.readCollectionAsList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public Counters getStats() { diff --git a/server/src/main/java/org/elasticsearch/http/HttpStats.java b/server/src/main/java/org/elasticsearch/http/HttpStats.java index 9a77428b04535..2a8dcb6f6d8b6 100644 --- a/server/src/main/java/org/elasticsearch/http/HttpStats.java +++ b/server/src/main/java/org/elasticsearch/http/HttpStats.java @@ -31,14 +31,14 @@ public HttpStats(long serverOpen, long totalOpened) { } public HttpStats(StreamInput in) throws IOException { - this(in.readVLong(), in.readVLong(), in.readList(ClientStats::new)); + this(in.readVLong(), in.readVLong(), in.readCollectionAsList(ClientStats::new)); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(serverOpen); out.writeVLong(totalOpen); - out.writeList(clientStats); + out.writeCollection(clientStats); } public long getServerOpen() { diff --git a/server/src/main/java/org/elasticsearch/index/IndexService.java b/server/src/main/java/org/elasticsearch/index/IndexService.java index ded7e459403eb..05c6fd63c3fcb 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexService.java +++ b/server/src/main/java/org/elasticsearch/index/IndexService.java @@ -98,6 +98,7 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.BooleanSupplier; @@ -1062,8 +1063,8 @@ abstract static class BaseAsyncTask extends AbstractAsyncTask { protected final IndexService indexService; - BaseAsyncTask(final IndexService indexService, final TimeValue interval) { - super(indexService.logger, indexService.threadPool, interval, true); + BaseAsyncTask(final IndexService indexService, final Executor executor, final TimeValue interval) { + super(indexService.logger, indexService.threadPool, executor, interval, true); this.indexService = indexService; rescheduleIfNecessary(); } @@ -1082,12 +1083,11 @@ protected boolean mustReschedule() { static final class AsyncTranslogFSync extends BaseAsyncTask { AsyncTranslogFSync(IndexService indexService) { - super(indexService, indexService.getIndexSettings().getTranslogSyncInterval()); - } - - @Override - protected String getThreadPool() { - return ThreadPool.Names.FLUSH; + super( + indexService, + indexService.threadPool.executor(ThreadPool.Names.FLUSH), + indexService.getIndexSettings().getTranslogSyncInterval() + ); } @Override @@ -1111,7 +1111,11 @@ public String toString() { static final class AsyncRefreshTask extends BaseAsyncTask { AsyncRefreshTask(IndexService indexService) { - super(indexService, indexService.getIndexSettings().getRefreshInterval()); + super( + indexService, + indexService.threadPool.executor(ThreadPool.Names.REFRESH), + indexService.getIndexSettings().getRefreshInterval() + ); } @Override @@ -1119,11 +1123,6 @@ protected void runInternal() { indexService.maybeRefreshEngine(false); } - @Override - protected String getThreadPool() { - return ThreadPool.Names.REFRESH; - } - @Override public String toString() { return "refresh"; @@ -1135,6 +1134,7 @@ final class AsyncTrimTranslogTask extends BaseAsyncTask { AsyncTrimTranslogTask(IndexService indexService) { super( indexService, + threadPool.generic(), indexService.getIndexSettings() .getSettings() .getAsTime(INDEX_TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING, TimeValue.timeValueMinutes(10)) @@ -1151,11 +1151,6 @@ protected void runInternal() { indexService.maybeTrimTranslog(); } - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } - @Override public String toString() { return "trim_translog"; @@ -1187,7 +1182,11 @@ private static final class AsyncGlobalCheckpointTask extends BaseAsyncTask { AsyncGlobalCheckpointTask(final IndexService indexService) { // index.global_checkpoint_sync_interval is not a real setting, it is only registered in tests - super(indexService, GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.get(indexService.getIndexSettings().getSettings())); + super( + indexService, + indexService.getThreadPool().generic(), + GLOBAL_CHECKPOINT_SYNC_INTERVAL_SETTING.get(indexService.getIndexSettings().getSettings()) + ); } @Override @@ -1195,11 +1194,6 @@ protected void runInternal() { indexService.maybeSyncGlobalCheckpoints(); } - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } - @Override public String toString() { return "global_checkpoint_sync"; @@ -1209,7 +1203,11 @@ public String toString() { private static final class AsyncRetentionLeaseSyncTask extends BaseAsyncTask { AsyncRetentionLeaseSyncTask(final IndexService indexService) { - super(indexService, RETENTION_LEASE_SYNC_INTERVAL_SETTING.get(indexService.getIndexSettings().getSettings())); + super( + indexService, + indexService.threadPool.executor(ThreadPool.Names.MANAGEMENT), + RETENTION_LEASE_SYNC_INTERVAL_SETTING.get(indexService.getIndexSettings().getSettings()) + ); } @Override @@ -1217,11 +1215,6 @@ protected void runInternal() { indexService.syncRetentionLeases(); } - @Override - protected String getThreadPool() { - return ThreadPool.Names.MANAGEMENT; - } - @Override public String toString() { return "retention_lease_sync"; diff --git a/server/src/main/java/org/elasticsearch/index/IndexVersion.java b/server/src/main/java/org/elasticsearch/index/IndexVersion.java index 417a22e2134a5..34f415e46462a 100644 --- a/server/src/main/java/org/elasticsearch/index/IndexVersion.java +++ b/server/src/main/java/org/elasticsearch/index/IndexVersion.java @@ -18,8 +18,6 @@ import org.elasticsearch.xcontent.ToXContentFragment; import org.elasticsearch.xcontent.XContentBuilder; -import java.io.IOException; - import java.io.IOException; import java.lang.reflect.Field; import java.util.Collection; @@ -67,8 +65,9 @@ public record IndexVersion(int id, Version luceneVersion) implements VersionId IDS = new HashMap<>(); private static IndexVersion registerIndexVersion(int id, Version luceneVersion, String uniqueId) { @@ -84,109 +83,74 @@ private static IndexVersion registerIndexVersion(int id, Version luceneVersion, public static final IndexVersion ZERO = registerIndexVersion(0, Version.LATEST, "00000000-0000-0000-0000-000000000000"); public static final IndexVersion V_7_0_0 = registerIndexVersion(7_00_00_99, Version.LUCENE_8_0_0, "b32be92d-c403-4858-a4a3-20d699a47ae6"); - public static final IndexVersion V_7_0_1 = registerIndexVersion(7_00_01_99, Version.LUCENE_8_0_0, "a03ed728-eac8-4e50-bcce-864806bb10e0"); public static final IndexVersion V_7_1_0 = registerIndexVersion(7_01_00_99, Version.LUCENE_8_0_0, "f9964d87-9f20-4b26-af32-be1f979216ec"); - public static final IndexVersion V_7_1_1 = registerIndexVersion(7_01_01_99, Version.LUCENE_8_0_0, "29a3fb69-55d0-4389-aea9-96c98ce23830"); public static final IndexVersion V_7_2_0 = registerIndexVersion(7_02_00_99, Version.LUCENE_8_0_0, "dba49448-87d4-45bb-ba19-f7b4eb85c757"); public static final IndexVersion V_7_2_1 = registerIndexVersion(7_02_01_99, Version.LUCENE_8_0_0, "58874b45-f9f8-4c04-92a9-67548a8b21c3"); public static final IndexVersion V_7_3_0 = registerIndexVersion(7_03_00_99, Version.LUCENE_8_1_0, "3d8a21df-58a4-4d7a-ba5d-438c92c16a7b"); - public static final IndexVersion V_7_3_1 = registerIndexVersion(7_03_01_99, Version.LUCENE_8_1_0, "5687797f-448b-490d-94d4-d7e8cfac0c98"); - public static final IndexVersion V_7_3_2 = registerIndexVersion(7_03_02_99, Version.LUCENE_8_1_0, "5a3462e5-d2fe-4b7b-9a7e-c0234412271f"); public static final IndexVersion V_7_4_0 = registerIndexVersion(7_04_00_99, Version.LUCENE_8_2_0, "c1fe73ba-0173-476c-aba2-855c2b31ac18"); - public static final IndexVersion V_7_4_1 = registerIndexVersion(7_04_01_99, Version.LUCENE_8_2_0, "8a917374-bd4f-45e3-9052-575c4cf741cd"); - public static final IndexVersion V_7_4_2 = registerIndexVersion(7_04_02_99, Version.LUCENE_8_2_0, "f073a867-cba2-41e4-8150-a2f2a96f1e0b"); public static final IndexVersion V_7_5_0 = registerIndexVersion(7_05_00_99, Version.LUCENE_8_3_0, "ab08ae25-ede2-4e57-a43f-89d96aa989e4"); - public static final IndexVersion V_7_5_1 = registerIndexVersion(7_05_01_99, Version.LUCENE_8_3_0, "a386d62e-cb85-4a37-b5f9-c9468bbfc457"); public static final IndexVersion V_7_5_2 = registerIndexVersion(7_05_02_99, Version.LUCENE_8_3_0, "706715ca-3b91-40d2-8c2e-c34c459b5d0d"); public static final IndexVersion V_7_6_0 = registerIndexVersion(7_06_00_99, Version.LUCENE_8_4_0, "63acbdb9-51c8-4976-bb3d-e55052a4fbd4"); - public static final IndexVersion V_7_6_1 = registerIndexVersion(7_06_01_99, Version.LUCENE_8_4_0, "1acc33d3-28dc-448d-953a-664dad3bf1f5"); - public static final IndexVersion V_7_6_2 = registerIndexVersion(7_06_02_99, Version.LUCENE_8_4_0, "3aa17069-fa04-4bf9-96af-fe8b903faa75"); public static final IndexVersion V_7_7_0 = registerIndexVersion(7_07_00_99, Version.LUCENE_8_5_1, "6fff8238-e6ce-4fb2-85de-196492026e49"); - public static final IndexVersion V_7_7_1 = registerIndexVersion(7_07_01_99, Version.LUCENE_8_5_1, "4ce6641d-157b-4c59-8261-7997ac0f6e40"); public static final IndexVersion V_7_8_0 = registerIndexVersion(7_08_00_99, Version.LUCENE_8_5_1, "81d7d459-f386-4c20-8235-f8fce8af7f0e"); - public static final IndexVersion V_7_8_1 = registerIndexVersion(7_08_01_99, Version.LUCENE_8_5_1, "a1b015bc-d020-453f-85a6-9413e169304a"); public static final IndexVersion V_7_9_0 = registerIndexVersion(7_09_00_99, Version.LUCENE_8_6_0, "0fa951a2-43ce-4f76-91bf-066c1ecf8a93"); - public static final IndexVersion V_7_9_1 = registerIndexVersion(7_09_01_99, Version.LUCENE_8_6_2, "5fc4aabc-080e-4840-af4f-a724deba98b1"); - public static final IndexVersion V_7_9_2 = registerIndexVersion(7_09_02_99, Version.LUCENE_8_6_2, "ef824617-332e-4b63-969e-ebb73a868462"); - public static final IndexVersion V_7_9_3 = registerIndexVersion(7_09_03_99, Version.LUCENE_8_6_2, "499c810a-0f37-4dfd-92ad-55e4936f3578"); public static final IndexVersion V_7_10_0 = registerIndexVersion(7_10_00_99, Version.LUCENE_8_7_0, "92ccd91c-0251-4263-8873-9f1abfac3c10"); - public static final IndexVersion V_7_10_1 = registerIndexVersion(7_10_01_99, Version.LUCENE_8_7_0, "8ce37467-964f-43eb-ad2d-a51a50116868"); - public static final IndexVersion V_7_10_2 = registerIndexVersion(7_10_02_99, Version.LUCENE_8_7_0, "cb277ccb-3081-4238-be2c-c3167316a435"); public static final IndexVersion V_7_11_0 = registerIndexVersion(7_11_00_99, Version.LUCENE_8_7_0, "e6d65f96-26d5-4669-ac5a-2964b9b1699f"); - public static final IndexVersion V_7_11_1 = registerIndexVersion(7_11_01_99, Version.LUCENE_8_7_0, "e3655b78-14f7-4432-aa28-34cd1ef2d229"); - public static final IndexVersion V_7_11_2 = registerIndexVersion(7_11_02_99, Version.LUCENE_8_7_0, "1ecfd0ee-4868-4384-b2a0-af6ecb01e496"); public static final IndexVersion V_7_12_0 = registerIndexVersion(7_12_00_99, Version.LUCENE_8_8_0, "39e2989a-a9a4-4f1a-b185-2e6015f74b1c"); - public static final IndexVersion V_7_12_1 = registerIndexVersion(7_12_01_99, Version.LUCENE_8_8_0, "a8307f67-8295-4567-a7eb-2a6e69046282"); public static final IndexVersion V_7_13_0 = registerIndexVersion(7_13_00_99, Version.LUCENE_8_8_2, "28b21fe0-4d1f-4c04-95cc-74df494ae0cf"); - public static final IndexVersion V_7_13_1 = registerIndexVersion(7_13_01_99, Version.LUCENE_8_8_2, "4952d7a7-d9f5-443b-b362-8c5ebdc57f81"); - public static final IndexVersion V_7_13_2 = registerIndexVersion(7_13_02_99, Version.LUCENE_8_8_2, "d77c4245-9d26-4da3-aa61-78ab34c3c792"); - public static final IndexVersion V_7_13_3 = registerIndexVersion(7_13_03_99, Version.LUCENE_8_8_2, "a263a47e-4075-4c68-8a42-15a37455c30f"); - public static final IndexVersion V_7_13_4 = registerIndexVersion(7_13_04_99, Version.LUCENE_8_8_2, "d17644c8-3144-495d-8f6c-42cd36698e98"); public static final IndexVersion V_7_14_0 = registerIndexVersion(7_14_00_99, Version.LUCENE_8_9_0, "b45bb223-bb73-4379-a46f-7dc74d38aaca"); - public static final IndexVersion V_7_14_1 = registerIndexVersion(7_14_01_99, Version.LUCENE_8_9_0, "ee4a6d62-9e05-490b-93dd-b316f9a62d71"); - public static final IndexVersion V_7_14_2 = registerIndexVersion(7_14_02_99, Version.LUCENE_8_9_0, "285d3293-2896-431c-97dd-180890840947"); public static final IndexVersion V_7_15_0 = registerIndexVersion(7_15_00_99, Version.LUCENE_8_9_0, "ab666b02-b866-4b64-9ba3-d511e86c55b5"); - public static final IndexVersion V_7_15_1 = registerIndexVersion(7_15_01_99, Version.LUCENE_8_9_0, "5643957d-9b68-414a-8917-ea75cf549f67"); - public static final IndexVersion V_7_15_2 = registerIndexVersion(7_15_02_99, Version.LUCENE_8_9_0, "1a618039-d665-47ce-b6ca-886e88c64051"); public static final IndexVersion V_7_16_0 = registerIndexVersion(7_16_00_99, Version.LUCENE_8_10_1, "a582e900-2d92-474c-9be3-2e08fa88be4b"); - public static final IndexVersion V_7_16_1 = registerIndexVersion(7_16_01_99, Version.LUCENE_8_10_1, "bf666306-9b0d-468b-99dc-f2706dae9c11"); - public static final IndexVersion V_7_16_2 = registerIndexVersion(7_16_02_99, Version.LUCENE_8_10_1, "167c6d69-cae2-4281-8f37-984231620ee9"); - public static final IndexVersion V_7_16_3 = registerIndexVersion(7_16_03_99, Version.LUCENE_8_10_1, "5d25a795-2be6-4663-93dc-10d88efb7e3d"); public static final IndexVersion V_7_17_0 = registerIndexVersion(7_17_00_99, Version.LUCENE_8_11_1, "18766ab8-4691-40a2-94f1-526f3b71420c"); - public static final IndexVersion V_7_17_1 = registerIndexVersion(7_17_01_99, Version.LUCENE_8_11_1, "8ad49144-4a1c-4322-b33d-614a569fba9b"); - public static final IndexVersion V_7_17_2 = registerIndexVersion(7_17_02_99, Version.LUCENE_8_11_1, "50033cde-c905-4923-83d6-8139f3f110e1"); - public static final IndexVersion V_7_17_3 = registerIndexVersion(7_17_03_99, Version.LUCENE_8_11_1, "460b91d1-4f3d-4f56-8dca-8d9e15f5b862"); - public static final IndexVersion V_7_17_4 = registerIndexVersion(7_17_04_99, Version.LUCENE_8_11_1, "26e40d6f-ac7c-43a3-bd0c-1ec6c3093f66"); - public static final IndexVersion V_7_17_5 = registerIndexVersion(7_17_05_99, Version.LUCENE_8_11_1, "d80bc13c-7139-4ff9-979d-42701d480e33"); - public static final IndexVersion V_7_17_6 = registerIndexVersion(7_17_06_99, Version.LUCENE_8_11_1, "0b47328e-341a-4f97-927d-c49f5050778d"); - public static final IndexVersion V_7_17_7 = registerIndexVersion(7_17_07_99, Version.LUCENE_8_11_1, "b672ff6b-8078-4f6e-b426-6fcf7f8687b4"); - public static final IndexVersion V_7_17_8 = registerIndexVersion(7_17_08_99, Version.LUCENE_8_11_1, "0faffa1b-5fb3-4439-9367-f154fb25395f"); - public static final IndexVersion V_7_17_9 = registerIndexVersion(7_17_09_99, Version.LUCENE_8_11_1, "8044989f-77ef-4d6d-9dd8-1bdd805cef74"); - public static final IndexVersion V_7_17_10 = registerIndexVersion(7_17_10_99, Version.LUCENE_8_11_1, "66b743fb-8be6-443f-8920-d8c5ed561857"); - public static final IndexVersion V_7_17_11 = registerIndexVersion(7_17_11_99, Version.LUCENE_8_11_1, "f1935acc-1af9-44b0-97e9-67112d333753"); - public static final IndexVersion V_7_17_12 = registerIndexVersion(7_17_12_99, Version.LUCENE_8_11_1, "1a0719f2-96f4-4df5-b20d-62244e27d7d4"); - public static final IndexVersion V_7_17_13 = registerIndexVersion(7_17_13_99, Version.LUCENE_8_11_1, "171a73c6-3ece-4f10-804f-7104a97b557c"); public static final IndexVersion V_8_0_0 = registerIndexVersion(8_00_00_99, Version.LUCENE_9_0_0, "ff18a13c-1fa7-4cf7-a3b1-4fdcd9461d5b"); - public static final IndexVersion V_8_0_1 = registerIndexVersion(8_00_01_99, Version.LUCENE_9_0_0, "4bd5650f-3eff-418f-a7a6-ad46b2a9c941"); public static final IndexVersion V_8_1_0 = registerIndexVersion(8_01_00_99, Version.LUCENE_9_0_0, "b4742461-ee43-4fd0-a260-29f8388b82ec"); - public static final IndexVersion V_8_1_1 = registerIndexVersion(8_01_01_99, Version.LUCENE_9_0_0, "3883e088-9a1c-4494-8d71-768820485f33"); - public static final IndexVersion V_8_1_2 = registerIndexVersion(8_01_02_99, Version.LUCENE_9_0_0, "859a745a-81d3-463e-af58-615179a22d4f"); - public static final IndexVersion V_8_1_3 = registerIndexVersion(8_01_03_99, Version.LUCENE_9_0_0, "27a49f3f-d3ac-4b0e-8bba-1be24daf4a56"); public static final IndexVersion V_8_2_0 = registerIndexVersion(8_02_00_99, Version.LUCENE_9_1_0, "af0ed990-2f32-42b5-aaf3-59d21a3dca7a"); - public static final IndexVersion V_8_2_1 = registerIndexVersion(8_02_01_99, Version.LUCENE_9_1_0, "6e2a3812-062a-4d08-8c35-ddc930e8e246"); - public static final IndexVersion V_8_2_2 = registerIndexVersion(8_02_02_99, Version.LUCENE_9_1_0, "93d1434c-3956-408b-8204-93be8ab78856"); - public static final IndexVersion V_8_2_3 = registerIndexVersion(8_02_03_99, Version.LUCENE_9_1_0, "026f6904-2a04-4476-8097-02a75e37e0f7"); public static final IndexVersion V_8_3_0 = registerIndexVersion(8_03_00_99, Version.LUCENE_9_2_0, "eca8e8a3-0724-4247-a58d-e4eafcec4b3f"); - public static final IndexVersion V_8_3_1 = registerIndexVersion(8_03_01_99, Version.LUCENE_9_2_0, "dac08798-c0b5-46c9-bf27-d82c617ce41f"); - public static final IndexVersion V_8_3_2 = registerIndexVersion(8_03_02_99, Version.LUCENE_9_2_0, "2a0c5fb9-e8a5-4788-89f8-f5723bd68cee"); - public static final IndexVersion V_8_3_3 = registerIndexVersion(8_03_03_99, Version.LUCENE_9_2_0, "440a5f5c-767a-49f7-8593-dc7627b30397"); public static final IndexVersion V_8_4_0 = registerIndexVersion(8_04_00_99, Version.LUCENE_9_3_0, "d27324da-b36c-452a-93a8-9b69a6c302a1"); - public static final IndexVersion V_8_4_1 = registerIndexVersion(8_04_01_99, Version.LUCENE_9_3_0, "44108ecd-839b-423e-9ef1-9d457f244fff"); - public static final IndexVersion V_8_4_2 = registerIndexVersion(8_04_02_99, Version.LUCENE_9_3_0, "9c20ed39-8c32-4cf0-9f06-42735cbf604e"); - public static final IndexVersion V_8_4_3 = registerIndexVersion(8_04_03_99, Version.LUCENE_9_3_0, "e7d17607-47c0-4662-b308-beeb9a8ec552"); public static final IndexVersion V_8_5_0 = registerIndexVersion(8_05_00_99, Version.LUCENE_9_4_1, "c5284b51-7fee-4f34-a837-241bb57a7aa6"); - public static final IndexVersion V_8_5_1 = registerIndexVersion(8_05_01_99, Version.LUCENE_9_4_1, "b23a983c-9630-4a2b-8352-0f52b55ff87e"); - public static final IndexVersion V_8_5_2 = registerIndexVersion(8_05_02_99, Version.LUCENE_9_4_1, "cfc80b6f-cb5c-4a4c-b3af-5fa1000508a8"); - public static final IndexVersion V_8_5_3 = registerIndexVersion(8_05_03_99, Version.LUCENE_9_4_2, "f8ac8061-1b17-4cab-b2f6-94df31f7552e"); public static final IndexVersion V_8_6_0 = registerIndexVersion(8_06_00_99, Version.LUCENE_9_4_2, "5e78c76c-74aa-464e-9383-89bdffb74db9"); - public static final IndexVersion V_8_6_1 = registerIndexVersion(8_06_01_99, Version.LUCENE_9_4_2, "8dc502be-ef27-43b3-a27b-1cb925cbef7d"); - public static final IndexVersion V_8_6_2 = registerIndexVersion(8_06_02_99, Version.LUCENE_9_4_2, "e1e73b88-d188-4d82-b5e1-dee261418783"); public static final IndexVersion V_8_7_0 = registerIndexVersion(8_07_00_99, Version.LUCENE_9_5_0, "f9227941-d6f4-462b-957f-2bcd36c28382"); - public static final IndexVersion V_8_7_1 = registerIndexVersion(8_07_01_99, Version.LUCENE_9_5_0, "758780b8-4b0c-44c6-af5d-fdac10b6093a"); public static final IndexVersion V_8_8_0 = registerIndexVersion(8_08_00_99, Version.LUCENE_9_6_0, "d6ffc8d7-f6bd-469b-8495-01688c310000"); - public static final IndexVersion V_8_8_1 = registerIndexVersion(8_08_01_99, Version.LUCENE_9_6_0, "a613499e-ec1a-4b0b-81d3-a766aff3c27c"); public static final IndexVersion V_8_8_2 = registerIndexVersion(8_08_02_99, Version.LUCENE_9_6_0, "9db9d888-6be8-4a58-825c-f423fd8c6b00"); public static final IndexVersion V_8_9_0 = registerIndexVersion(8_09_00_99, Version.LUCENE_9_7_0, "32f6dbab-cc24-4f5b-87b5-015a848480d9"); public static final IndexVersion V_8_9_1 = registerIndexVersion(8_09_01_99, Version.LUCENE_9_7_0, "955a80ac-f70c-40a5-9399-1d8a1e5d342d"); - public static final IndexVersion V_8_9_2 = registerIndexVersion(8_09_02_99, Version.LUCENE_9_7_0, "14c7d64c-9e25-4265-b4fa-e0c5aca67f14"); public static final IndexVersion V_8_10_0 = registerIndexVersion(8_10_00_99, Version.LUCENE_9_7_0, "2e107286-12ad-4c51-9a6f-f8943663b6e7"); public static final IndexVersion V_8_11_0 = registerIndexVersion(8_11_00_99, Version.LUCENE_9_7_0, "f08382c0-06ab-41f4-a56a-cf5397275627"); + /* - * READ THE JAVADOC ABOVE BEFORE ADDING NEW INDEX VERSIONS + * READ THE COMMENT BELOW THIS BLOCK OF DECLARATIONS BEFORE ADDING NEW INDEX VERSIONS * Detached index versions added below here. */ + public static final IndexVersion V_8_500_000 = registerIndexVersion(8_500_000, Version.LUCENE_9_7_0, "bf656f5e-5808-4eee-bf8a-e2bf6736ff55"); + public static final IndexVersion V_8_500_001 = registerIndexVersion(8_500_001, Version.LUCENE_9_7_0, "45045a5a-fc57-4462-89f6-6bc04cda6015"); + /* + * STOP! READ THIS FIRST! No, really, + * ____ _____ ___ ____ _ ____ _____ _ ____ _____ _ _ ___ ____ _____ ___ ____ ____ _____ _ + * / ___|_ _/ _ \| _ \| | | _ \| ____| / \ | _ \ |_ _| | | |_ _/ ___| | ___|_ _| _ \/ ___|_ _| | + * \___ \ | || | | | |_) | | | |_) | _| / _ \ | | | | | | | |_| || |\___ \ | |_ | || |_) \___ \ | | | | + * ___) || || |_| | __/|_| | _ <| |___ / ___ \| |_| | | | | _ || | ___) | | _| | || _ < ___) || | |_| + * |____/ |_| \___/|_| (_) |_| \_\_____/_/ \_\____/ |_| |_| |_|___|____/ |_| |___|_| \_\____/ |_| (_) + * + * A new index version should be added EVERY TIME a change is made to index metadata or data storage. + * Each index version should only be used in a single merged commit (apart from the BwC versions copied from o.e.Version, ≤V_8_11_0). + * + * To add a new index version, add a new constant at the bottom of the list, above this comment, which is one greater than the + * current highest version, ensure it has a fresh UUID, and update CurrentHolder#CURRENT to point to the new version. Don't add other + * lines, comments, etc. + * + * REVERTING AN INDEX VERSION + * + * If you revert a commit with an index version change, you MUST ensure there is a NEW index version representing the reverted + * change. DO NOT let the index version go backwards, it must ALWAYS be incremented. + * + * DETERMINING TRANSPORT VERSIONS FROM GIT HISTORY + * + * TODO after the release of v8.11.0, copy the instructions about using git to track the history of versions from TransportVersion.java + * (the example commands won't make sense until at least 8.11.0 is released) + */ + private static class CurrentHolder { - private static final IndexVersion CURRENT = findCurrent(V_8_11_0); + private static final IndexVersion CURRENT = findCurrent(V_8_500_001); // finds the pluggable current version, or uses the given fallback private static IndexVersion findCurrent(IndexVersion fallback) { diff --git a/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java b/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java index 69419c21e46e3..c82f59d05945a 100644 --- a/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java +++ b/server/src/main/java/org/elasticsearch/index/cache/query/QueryCacheStats.java @@ -57,6 +57,10 @@ public void add(QueryCacheStats stats) { cacheSize += stats.cacheSize; } + public void addRamBytesUsed(long additionalRamBytesUsed) { + ramBytesUsed += additionalRamBytesUsed; + } + public long getMemorySizeInBytes() { return ramBytesUsed; } diff --git a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java index 1a44f1f46c33e..16b58e001dcbe 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/CommitStats.java @@ -83,7 +83,7 @@ public int getNumDocs() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(userData, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(userData, StreamOutput::writeString); out.writeLong(generation); out.writeOptionalString(id); out.writeInt(numDocs); diff --git a/server/src/main/java/org/elasticsearch/index/engine/Engine.java b/server/src/main/java/org/elasticsearch/index/engine/Engine.java index 4c58c075bf58a..7f896c352d958 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Engine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Engine.java @@ -283,10 +283,9 @@ public void verifyEngineBeforeIndexClosing() throws IllegalStateException { public interface IndexCommitListener { /** - * This method is invoked each time a new Lucene commit is created through this engine. There is no guarantee that a listener will - * be notified of the commits in order, ie newer commits may appear before older ones. The {@link IndexCommitRef} prevents the - * {@link IndexCommitRef} files to be deleted from disk until the reference is closed. As such, the listener must close the - * reference as soon as it is done with it. + * This method is invoked each time a new Lucene commit is created through this engine. Note that commits are notified in order. The + * {@link IndexCommitRef} prevents the {@link IndexCommitRef} files to be deleted from disk until the reference is closed. As such, + * the listener must close the reference as soon as it is done with it. * * @param shardId the {@link ShardId} of shard * @param store the index shard store @@ -1127,7 +1126,7 @@ public void externalRefresh(String source, ActionListener * Called when our engine is using too much heap and should move buffered indexed/deleted documents to disk. */ // NOTE: do NOT rename this to something containing flush or refresh! - public abstract void writeIndexingBuffer() throws EngineException; + public abstract void writeIndexingBuffer() throws IOException; /** * Checks if this engine should be flushed periodically. diff --git a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java index a2924e7a295fb..b296de8739c90 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/InternalEngine.java @@ -88,6 +88,7 @@ import org.elasticsearch.index.seqno.SequenceNumbers; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.index.store.Store; import org.elasticsearch.index.translog.Translog; import org.elasticsearch.index.translog.TranslogConfig; import org.elasticsearch.index.translog.TranslogCorruptedException; @@ -125,6 +126,7 @@ import static org.elasticsearch.core.Strings.format; public class InternalEngine extends Engine { + /** * When we last pruned expired tombstones from versionMap.deletes: */ @@ -360,26 +362,57 @@ private SoftDeletesPolicy newSoftDeletesPolicy() throws IOException { @Nullable private CombinedDeletionPolicy.CommitsListener newCommitsListener() { - final Engine.IndexCommitListener listener = engineConfig.getIndexCommitListener(); + Engine.IndexCommitListener listener = engineConfig.getIndexCommitListener(); if (listener != null) { + final IndexCommitListener wrappedListener = Assertions.ENABLED ? assertingCommitsOrderListener(listener) : listener; return new CombinedDeletionPolicy.CommitsListener() { @Override public void onNewAcquiredCommit(final IndexCommit commit, final Set additionalFiles) { final IndexCommitRef indexCommitRef = acquireIndexCommitRef(() -> commit); var primaryTerm = config().getPrimaryTermSupplier().getAsLong(); assert indexCommitRef.getIndexCommit() == commit; - listener.onNewCommit(shardId, store, primaryTerm, indexCommitRef, additionalFiles); + wrappedListener.onNewCommit(shardId, store, primaryTerm, indexCommitRef, additionalFiles); } @Override public void onDeletedCommit(IndexCommit commit) { - listener.onIndexCommitDelete(shardId, commit); + wrappedListener.onIndexCommitDelete(shardId, commit); } }; } return null; } + private IndexCommitListener assertingCommitsOrderListener(final IndexCommitListener listener) { + final AtomicLong generation = new AtomicLong(0L); + return new IndexCommitListener() { + @Override + public void onNewCommit( + ShardId shardId, + Store store, + long primaryTerm, + IndexCommitRef indexCommitRef, + Set additionalFiles + ) { + final long nextGen = indexCommitRef.getIndexCommit().getGeneration(); + final long prevGen = generation.getAndSet(nextGen); + assert prevGen < nextGen + : "Expect new commit generation " + + nextGen + + " to be greater than previous commit generation " + + prevGen + + " for shard " + + shardId; + listener.onNewCommit(shardId, store, primaryTerm, indexCommitRef, additionalFiles); + } + + @Override + public void onIndexCommitDelete(ShardId shardId, IndexCommit deletedCommit) { + listener.onIndexCommitDelete(shardId, deletedCommit); + } + }; + } + @Override public CompletionStats completionStats(String... fieldNamePatterns) { return completionStatsCache.get(fieldNamePatterns); @@ -731,8 +764,8 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external DirectoryReader.open(indexWriter), shardId ); - internalReaderManager = new ElasticsearchReaderManager(directoryReader); lastCommittedSegmentInfos = store.readLastCommittedSegmentsInfo(); + internalReaderManager = createInternalReaderManager(directoryReader); ExternalReaderManager externalReaderManager = new ExternalReaderManager(internalReaderManager, externalRefreshListener); success = true; return externalReaderManager; @@ -752,6 +785,10 @@ private ExternalReaderManager createReaderManager(RefreshWarmerListener external } } + protected ElasticsearchReaderManager createInternalReaderManager(ElasticsearchDirectoryReader directoryReader) { + return new ElasticsearchReaderManager(directoryReader); + } + public final AtomicLong translogGetCount = new AtomicLong(); // number of times realtime get was done on translog public final AtomicLong translogInMemorySegmentsCount = new AtomicLong(); // number of times in-memory index needed to be created @@ -2031,9 +2068,28 @@ protected final RefreshResult refresh(String source, SearcherScope scope, boolea } @Override - public void writeIndexingBuffer() throws EngineException { + public void writeIndexingBuffer() throws IOException { + final long versionMapBytesUsed = versionMap.ramBytesUsedForRefresh(); + // Only count bytes that are not already being written to disk. Note: this number may be negative at times if these two metrics get + // updated concurrently. It's fine as it's only being used as a heuristic to decide on a full refresh vs. writing a single segment. + // TODO: it might be more relevant to use the RAM usage of the largest DWPT as opposed to the overall RAM usage? Can we get this + // exposed in Lucene? + final long indexWriterBytesUsed = indexWriter.ramBytesUsed() - indexWriter.getFlushingBytes(); + + if (versionMapBytesUsed >= indexWriterBytesUsed) { + // This method expects to reclaim memory quickly, so if the version map is using more memory than the IndexWriter buffer then we + // do a refresh, which is the only way to reclaim memory from the version map. IndexWriter#flushNextBuffer has similar logic: if + // pending deletes occupy more than half of RAMBufferSizeMB then deletes are applied too. + reclaimVersionMapMemory(); + } else { + // Write the largest pending segment. + indexWriter.flushNextBuffer(); + } + } + + private void reclaimVersionMapMemory() { // If we're already halfway through the flush thresholds, then we do a flush. This will save us from writing segments twice - // independently in a short period of time, once to reclaim IndexWriter buffer memory and then to reclaim the translog. For + // independently in a short period of time, once to reclaim version map memory and then to reclaim the translog. For // memory-constrained deployments that need to refresh often to reclaim memory, this may require flushing 2x more often than // expected, but the general assumption is that this downside is an ok trade-off given the benefit of flushing the whole content of // the indexing buffer less often. @@ -2044,12 +2100,9 @@ public void writeIndexingBuffer() throws EngineException { final long flushThresholdAgeInNanos = config().getIndexSettings().getFlushThresholdAge().getNanos() / 2; if (shouldPeriodicallyFlush(flushThresholdSizeInBytes, flushThresholdAgeInNanos)) { flush(false, false, ActionListener.noop()); - return; + } else { + refresh("write indexing buffer", SearcherScope.INTERNAL, false); } - - // TODO: revise https://github.com/elastic/elasticsearch/pull/34553 to use IndexWriter.flushNextBuffer to flush only the largest - // pending DWPT. Note that benchmarking this PR with a heavy update user case (geonames) and a small heap (1GB) caused OOM. - refresh("write indexing buffer", SearcherScope.INTERNAL, false); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java index 4fed853d945c9..86ab5b8edebe6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java +++ b/server/src/main/java/org/elasticsearch/index/engine/LiveVersionMap.java @@ -201,6 +201,10 @@ void remove(BytesRef uid, DeleteVersionValue deleted) { long getMinDeleteTimestamp() { return Math.min(current.minDeleteTimestamp.get(), old.minDeleteTimestamp.get()); } + + long ramBytesUsed() { + return current.ramBytesUsed.get() + old.ramBytesUsed.get(); + } } // All deletes also go here, and delete "tombstones" are retained after refresh: @@ -448,20 +452,20 @@ synchronized void clear() { @Override public long ramBytesUsed() { - return maps.current.ramBytesUsed.get() + ramBytesUsedTombstones.get(); + return maps.ramBytesUsed() + ramBytesUsedTombstones.get(); } /** - * Returns how much RAM would be freed up by refreshing. This is {@link #ramBytesUsed} except does not include tombstones because they - * don't clear on refresh. + * Returns how much RAM would be freed up by refreshing. This is the RAM usage of the current version map. It doesn't include tombstones + * since they don't get cleared on refresh, nor the old version map that is being reclaimed. */ long ramBytesUsedForRefresh() { return maps.current.ramBytesUsed.get(); } /** - * Returns how much RAM is current being freed up by refreshing. This is {@link #ramBytesUsed()} - * except does not include tombstones because they don't clear on refresh. + * Returns how much RAM is current being freed up by refreshing. This is the RAM usage of the previous version map that needs to stay + * around until operations are safely recorded in the Lucene index. */ long getRefreshingBytes() { return maps.old.ramBytesUsed.get(); diff --git a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java index b38cc24e107ea..19345083bbc7b 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java +++ b/server/src/main/java/org/elasticsearch/index/engine/ReadOnlyEngine.java @@ -440,7 +440,7 @@ public void maybeRefresh(String source, ActionListener listener) } @Override - public void writeIndexingBuffer() throws EngineException {} + public void writeIndexingBuffer() {} @Override public boolean shouldPeriodicallyFlush() { diff --git a/server/src/main/java/org/elasticsearch/index/engine/Segment.java b/server/src/main/java/org/elasticsearch/index/engine/Segment.java index acfb1ac8920a1..9098aaecfeea6 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/Segment.java +++ b/server/src/main/java/org/elasticsearch/index/engine/Segment.java @@ -14,7 +14,7 @@ import org.apache.lucene.search.SortedNumericSortField; import org.apache.lucene.search.SortedSetSelector; import org.apache.lucene.search.SortedSetSortField; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -52,7 +52,7 @@ public Segment(StreamInput in) throws IOException { version = Lucene.parseVersionLenient(in.readOptionalString(), null); compound = in.readOptionalBoolean(); mergeId = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readLong(); // memoryInBytes } if (in.readBoolean()) { @@ -159,7 +159,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(version.toString()); out.writeOptionalBoolean(compound); out.writeOptionalString(mergeId); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeLong(0); // memoryInBytes } @@ -168,7 +168,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasAttributes = attributes != null; out.writeBoolean(hasAttributes); if (hasAttributes) { - out.writeMap(attributes, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(attributes, StreamOutput::writeString); } } @@ -252,7 +252,7 @@ private static void writeSegmentSort(StreamOutput out, Sort sort) throws IOExcep o.writeBoolean(((SortedNumericSortField) field).getSelector() == SortedNumericSelector.Type.MAX); o.writeBoolean(field.getReverse()); } else if (field.getType().equals(SortField.Type.STRING)) { - if (o.getTransportVersion().before(TransportVersion.V_8_5_0)) { + if (o.getTransportVersion().before(TransportVersions.V_8_5_0)) { // The closest supported version before 8.5.0 was SortedSet fields, so we mimic that o.writeByte(SORT_STRING_SET); o.writeOptionalBoolean(field.getMissingValue() == null ? null : field.getMissingValue() == SortField.STRING_FIRST); diff --git a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java index 5a2edcb059787..1b8069c726ab3 100644 --- a/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java +++ b/server/src/main/java/org/elasticsearch/index/engine/SegmentsStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.engine; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -39,7 +39,7 @@ public SegmentsStats() { public SegmentsStats(StreamInput in) throws IOException { count = in.readVLong(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readLong(); // memoryInBytes in.readLong(); // termsMemoryInBytes in.readLong(); // storedFieldsMemoryInBytes @@ -220,7 +220,7 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeLong(0L); // memoryInBytes out.writeLong(0L); // termsMemoryInBytes out.writeLong(0L); // storedFieldsMemoryInBytes @@ -250,7 +250,7 @@ public static class FileStats implements Writeable, ToXContentFragment { private final long max; FileStats(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { this.ext = in.readString(); this.total = in.readVLong(); this.count = in.readVLong(); @@ -295,7 +295,7 @@ public long getMax() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeString(ext); out.writeVLong(total); out.writeVLong(count); diff --git a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java index 07b92f97aa8ae..c5c2035bdd39e 100644 --- a/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java +++ b/server/src/main/java/org/elasticsearch/index/fielddata/FieldDataStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.fielddata; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.FieldMemoryStats; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ public FieldDataStats(StreamInput in) throws IOException { memorySize = in.readVLong(); evictions = in.readVLong(); fields = in.readOptionalWriteable(FieldMemoryStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { long buildTimeMillis = in.readVLong(); Map fieldGlobalOrdinalsStats = null; if (in.readBoolean()) { @@ -110,11 +110,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(memorySize); out.writeVLong(evictions); out.writeOptionalWriteable(fields); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeVLong(globalOrdinalsStats.buildTimeMillis); if (globalOrdinalsStats.fieldGlobalOrdinalsStats != null) { out.writeBoolean(true); - out.writeMap(globalOrdinalsStats.fieldGlobalOrdinalsStats, StreamOutput::writeString, (out1, value) -> { + out.writeMap(globalOrdinalsStats.fieldGlobalOrdinalsStats, (out1, value) -> { out1.writeVLong(value.totalBuildingTime); out1.writeVLong(value.valueCount); }); diff --git a/server/src/main/java/org/elasticsearch/index/get/GetResult.java b/server/src/main/java/org/elasticsearch/index/get/GetResult.java index efedb141d2460..7d542d1e35275 100644 --- a/server/src/main/java/org/elasticsearch/index/get/GetResult.java +++ b/server/src/main/java/org/elasticsearch/index/get/GetResult.java @@ -9,7 +9,7 @@ package org.elasticsearch.index.get; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; @@ -66,7 +66,7 @@ public class GetResult implements Writeable, Iterable, ToXContent public GetResult(StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -386,7 +386,7 @@ public static GetResult fromXContent(XContentParser parser) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java index 0fbdc345c8da2..e4fb3cbfc8809 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParser.java @@ -17,6 +17,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldDataCache; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.plugins.internal.DocumentParsingObserver; @@ -40,11 +41,16 @@ import java.util.function.Consumer; import java.util.function.Supplier; +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING; + /** * A parser for documents */ public final class DocumentParser { + public static final IndexVersion DYNAMICALLY_MAP_DENSE_VECTORS_INDEX_VERSION = IndexVersion.V_8_11_0; + private final XContentParserConfiguration parserConfiguration; private final Supplier documentParsingObserverSupplier; private final MappingParserContext mappingParserContext; @@ -244,9 +250,8 @@ static Mapping createDynamicUpdate(DocumentParserContext context) { return null; } RootObjectMapper.Builder rootBuilder = context.updateRoot(); - for (Mapper mapper : context.getDynamicMappers()) { - rootBuilder.addDynamic(mapper.name(), null, mapper, context); - } + context.getDynamicMappers().forEach(mapper -> rootBuilder.addDynamic(mapper.name(), null, mapper, context)); + for (RuntimeField runtimeField : context.getDynamicRuntimeFields()) { rootBuilder.addRuntimeField(runtimeField); } @@ -588,6 +593,33 @@ private static void parseNonDynamicArray(DocumentParserContext context, final St parseValue(context, lastFieldName); } } + postProcessDynamicArrayMapping(context, lastFieldName); + } + + /** + * Arrays that have been classified as floats and meet specific criteria are re-mapped to dense_vector. + */ + private static void postProcessDynamicArrayMapping(DocumentParserContext context, String fieldName) { + if (context.indexSettings().getIndexVersionCreated().onOrAfter(DYNAMICALLY_MAP_DENSE_VECTORS_INDEX_VERSION)) { + final MapperBuilderContext builderContext = context.createDynamicMapperBuilderContext(); + final String fullFieldName = builderContext.buildFullName(fieldName); + final List mappers = context.getDynamicMappers(fullFieldName); + if (mappers == null + || context.isFieldAppliedFromTemplate(fullFieldName) + || context.isCopyToField(fullFieldName) + || mappers.size() < MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING + || mappers.size() > MAX_DIMS_COUNT + || mappers.stream().allMatch(m -> m instanceof NumberFieldMapper && "float".equals(m.typeName())) == false) { + return; + } + + DenseVectorFieldMapper.Builder builder = new DenseVectorFieldMapper.Builder( + fieldName, + context.indexSettings().getIndexVersionCreated() + ); + DenseVectorFieldMapper denseVectorFieldMapper = builder.build(builderContext); + context.updateDynamicMappers(fullFieldName, List.of(denseVectorFieldMapper)); + } } private static void throwEOFOnParseArray(String arrayFieldName, DocumentParserContext context) { @@ -677,6 +709,7 @@ private static void parseCopyFields(DocumentParserContext context, List assert targetDoc != null; final DocumentParserContext copyToContext = context.createCopyToContext(field, targetDoc); innerParseObject(copyToContext); + context.markFieldAsCopyTo(field); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java index 933f97d46dc08..15265ea821e5c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DocumentParserContext.java @@ -84,7 +84,7 @@ protected void addDoc(LuceneDocument doc) { private final MappingParserContext mappingParserContext; private final SourceToParse sourceToParse; private final Set ignoredFields; - private final List dynamicMappers; + private final Map> dynamicMappers; private final Set newFieldsSeen; private final Map dynamicObjectMappers; private final List dynamicRuntimeFields; @@ -94,13 +94,15 @@ protected void addDoc(LuceneDocument doc) { private String id; private Field version; private SeqNoFieldMapper.SequenceIDFields seqID; + private final Set fieldsAppliedFromTemplates; + private final Set copyToFields; private DocumentParserContext( MappingLookup mappingLookup, MappingParserContext mappingParserContext, SourceToParse sourceToParse, Set ignoreFields, - List dynamicMappers, + Map> dynamicMappers, Set newFieldsSeen, Map dynamicObjectMappers, List dynamicRuntimeFields, @@ -109,7 +111,9 @@ private DocumentParserContext( SeqNoFieldMapper.SequenceIDFields seqID, DocumentDimensions dimensions, ObjectMapper parent, - ObjectMapper.Dynamic dynamic + ObjectMapper.Dynamic dynamic, + Set fieldsAppliedFromTemplates, + Set copyToFields ) { this.mappingLookup = mappingLookup; this.mappingParserContext = mappingParserContext; @@ -125,6 +129,8 @@ private DocumentParserContext( this.dimensions = dimensions; this.parent = parent; this.dynamic = dynamic; + this.fieldsAppliedFromTemplates = fieldsAppliedFromTemplates; + this.copyToFields = copyToFields; } private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, DocumentParserContext in) { @@ -142,7 +148,9 @@ private DocumentParserContext(ObjectMapper parent, ObjectMapper.Dynamic dynamic, in.seqID, in.dimensions, parent, - dynamic + dynamic, + in.fieldsAppliedFromTemplates, + in.copyToFields ); } @@ -158,7 +166,7 @@ protected DocumentParserContext( mappingParserContext, source, new HashSet<>(), - new ArrayList<>(), + new HashMap<>(), new HashSet<>(), new HashMap<>(), new ArrayList<>(), @@ -167,7 +175,9 @@ protected DocumentParserContext( null, DocumentDimensions.fromIndexSettings(mappingParserContext.getIndexSettings()), parent, - dynamic + dynamic, + new HashSet<>(), + new HashSet<>() ); } @@ -275,6 +285,22 @@ public ObjectMapper.Dynamic dynamic() { return dynamic; } + public void markFieldAsAppliedFromTemplate(String fieldName) { + fieldsAppliedFromTemplates.add(fieldName); + } + + public boolean isFieldAppliedFromTemplate(String name) { + return fieldsAppliedFromTemplates.contains(name); + } + + public void markFieldAsCopyTo(String fieldName) { + copyToFields.add(fieldName); + } + + public boolean isCopyToField(String name) { + return copyToFields.contains(name); + } + /** * Add a new mapper dynamically created while parsing. */ @@ -283,6 +309,7 @@ public final void addDynamicMapper(Mapper mapper) { if (mapper instanceof ObjectMapper) { MappingLookup.checkObjectDepthLimit(indexSettings().getMappingDepthLimit(), mapper.name()); } + // eagerly check field name limit here to avoid OOM errors // only check fields that are not already mapped or tracked in order to avoid hitting field limit too early via double-counting // note that existing fields can also receive dynamic mapping updates (e.g. constant_keyword to fix the value) @@ -302,23 +329,39 @@ public final void addDynamicMapper(Mapper mapper) { addDynamicMapper(submapper); } } + // TODO we may want to stop adding object mappers to the dynamic mappers list: most times they will be mapped when parsing their // sub-fields (see ObjectMapper.Builder#addDynamic), which causes extra work as the two variants of the same object field // will be merged together when creating the final dynamic update. The only cases where object fields need extra treatment are // dynamically mapped objects when the incoming document defines no sub-fields in them: // 1) by default, they would be empty containers in the mappings, is it then important to map them? // 2) they can be the result of applying a dynamic template which may define sub-fields or set dynamic, enabled or subobjects. - dynamicMappers.add(mapper); + dynamicMappers.computeIfAbsent(mapper.name(), k -> new ArrayList<>()).add(mapper); } /** * Get dynamic mappers created as a result of parsing an incoming document. Responsible for exposing all the newly created * fields that need to be merged into the existing mappings. Used to create the required mapping update at the end of document parsing. - * Consists of a flat set of {@link Mapper}s that will need to be added to their respective parent {@link ObjectMapper}s in order + * Consists of a all {@link Mapper}s that will need to be added to their respective parent {@link ObjectMapper}s in order * to become part of the resulting dynamic mapping update. */ public final List getDynamicMappers() { - return dynamicMappers; + return dynamicMappers.values().stream().flatMap(List::stream).toList(); + } + + /** + * Returns the dynamic Consists of a flat set of {@link Mapper}s associated with a field name that will need to be added to their + * respective parent {@link ObjectMapper}s in order to become part of the resulting dynamic mapping update. + * @param fieldName Full field name with dot-notation. + * @return List of Mappers or null + */ + public final List getDynamicMappers(String fieldName) { + return dynamicMappers.get(fieldName); + } + + public void updateDynamicMappers(String name, List mappers) { + dynamicMappers.remove(name); + mappers.forEach(this::addDynamicMapper); } /** diff --git a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java index 4c547cca1f4da..f2d1b8058f115 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/DynamicFieldsBuilder.java @@ -208,7 +208,9 @@ private static void createDynamicField( DateFormatter dateFormatter, CheckedRunnable dynamicFieldStrategy ) throws IOException { - if (applyMatchingTemplate(context, name, matchType, dateFormatter) == false) { + if (applyMatchingTemplate(context, name, matchType, dateFormatter)) { + context.markFieldAsAppliedFromTemplate(name); + } else { dynamicFieldStrategy.run(); } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java index 9ee88f2cfe11f..55bc15528404d 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperBuilderContext.java @@ -10,38 +10,25 @@ import org.elasticsearch.common.Strings; -import java.util.Objects; -import java.util.function.BooleanSupplier; - /** * Holds context for building Mapper objects from their Builders */ public class MapperBuilderContext { + private static final MapperBuilderContext ROOT_SYNTHETIC = new MapperBuilderContext(null, true); + private static final MapperBuilderContext ROOT_NOT_SYNTHETIC = new MapperBuilderContext(null, false); + /** * The root context, to be used when building a tree of mappers */ public static MapperBuilderContext root(boolean isSourceSynthetic) { - return new MapperBuilderContext(null, () -> isSourceSynthetic); - } - - /** - * A context to use to build metadata fields. - */ - public static MapperBuilderContext forMetadata() { - return new MapperBuilderContext(null, () -> { - throw new UnsupportedOperationException("metadata fields can't check if _source is synthetic"); - }); + return isSourceSynthetic ? ROOT_SYNTHETIC : ROOT_NOT_SYNTHETIC; } private final String path; - private final BooleanSupplier isSourceSynthetic; + private final boolean isSourceSynthetic; MapperBuilderContext(String path, boolean isSourceSynthetic) { - this(Objects.requireNonNull(path), () -> isSourceSynthetic); - } - - private MapperBuilderContext(String path, BooleanSupplier isSourceSynthetic) { this.path = path; this.isSourceSynthetic = isSourceSynthetic; } @@ -69,6 +56,7 @@ public String buildFullName(String name) { * Is the {@code _source} field being reconstructed on the fly? */ public boolean isSourceSynthetic() { - return isSourceSynthetic.getAsBoolean(); + return isSourceSynthetic; } + } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java index d19e0bc716a9a..512c35a146d0c 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MapperService.java @@ -564,4 +564,7 @@ public DynamicTemplate[] getAllDynamicTemplates() { return documentMapper().mapping().getRoot().dynamicTemplates(); } + public MapperRegistry getMapperRegistry() { + return mapperRegistry; + } } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java index f1081743ef82a..a310d2bf0b0ae 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/MappingParser.java @@ -120,8 +120,7 @@ private Mapping parse(String type, Map mapping) throws MapperPar } @SuppressWarnings("unchecked") Map fieldNodeMap = (Map) fieldNode; - MetadataFieldMapper metadataFieldMapper = typeParser.parse(fieldName, fieldNodeMap, mappingParserContext) - .build(MapperBuilderContext.forMetadata()); + MetadataFieldMapper metadataFieldMapper = typeParser.parse(fieldName, fieldNodeMap, mappingParserContext).build(); metadataMappers.put(metadataFieldMapper.getClass(), metadataFieldMapper); assert fieldNodeMap.isEmpty(); if (metadataFieldMapper instanceof SourceFieldMapper sfm) { diff --git a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java index a082c7b5b658f..6d2c5b1cb71ac 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/TextFieldMapper.java @@ -450,25 +450,6 @@ private SubFieldInfo buildPhraseInfo(FieldType fieldType, TextFieldType parent) return new SubFieldInfo(parent.name() + FAST_PHRASE_SUFFIX, phraseFieldType, a); } - public Map indexAnalyzers(String name, SubFieldInfo phraseFieldInfo, SubFieldInfo prefixFieldInfo) { - Map analyzers = new HashMap<>(); - NamedAnalyzer main = this.analyzers.getIndexAnalyzer(); - analyzers.put(name, main); - if (phraseFieldInfo != null) { - analyzers.put( - phraseFieldInfo.field, - new NamedAnalyzer(main.name() + "_phrase", AnalyzerScope.INDEX, phraseFieldInfo.analyzer) - ); - } - if (prefixFieldInfo != null) { - analyzers.put( - prefixFieldInfo.field, - new NamedAnalyzer(main.name() + "_prefix", AnalyzerScope.INDEX, prefixFieldInfo.analyzer) - ); - } - return analyzers; - } - @Override public TextFieldMapper build(MapperBuilderContext context) { MultiFields multiFields = multiFieldsBuilder.build(this, context); @@ -488,17 +469,7 @@ public TextFieldMapper build(MapperBuilderContext context) { throw new MapperParsingException("Cannot use reserved field name [" + mapper.name() + "]"); } } - return new TextFieldMapper( - name, - fieldType, - tft, - indexAnalyzers(tft.name(), phraseFieldInfo, prefixFieldInfo), - prefixFieldInfo, - phraseFieldInfo, - multiFields, - copyTo.build(), - this - ); + return new TextFieldMapper(name, fieldType, tft, prefixFieldInfo, phraseFieldInfo, multiFields, copyTo.build(), this); } } @@ -1151,13 +1122,10 @@ public Query existsQuery(SearchExecutionContext context) { private final SubFieldInfo prefixFieldInfo; private final SubFieldInfo phraseFieldInfo; - private final Map indexAnalyzerMap; - protected TextFieldMapper( String simpleName, FieldType fieldType, TextFieldType mappedFieldType, - Map indexAnalyzers, SubFieldInfo prefixFieldInfo, SubFieldInfo phraseFieldInfo, MultiFields multiFields, @@ -1188,12 +1156,25 @@ protected TextFieldMapper( this.freqFilter = builder.freqFilter.getValue(); this.fieldData = builder.fieldData.get(); this.indexPhrases = builder.indexPhrases.getValue(); - this.indexAnalyzerMap = Map.copyOf(indexAnalyzers); } @Override public Map indexAnalyzers() { - return indexAnalyzerMap; + Map analyzersMap = new HashMap<>(); + analyzersMap.put(name(), indexAnalyzer); + if (phraseFieldInfo != null) { + analyzersMap.put( + phraseFieldInfo.field, + new NamedAnalyzer(indexAnalyzer.name() + "_phrase", AnalyzerScope.INDEX, phraseFieldInfo.analyzer) + ); + } + if (prefixFieldInfo != null) { + analyzersMap.put( + prefixFieldInfo.field, + new NamedAnalyzer(indexAnalyzer.name() + "_prefix", AnalyzerScope.INDEX, prefixFieldInfo.analyzer) + ); + } + return analyzersMap; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java index 7976940e50858..155dac09f0b86 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapper.java @@ -26,6 +26,7 @@ import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.VectorUtil; import org.elasticsearch.common.xcontent.support.XContentMapValues; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.fielddata.FieldDataContext; @@ -34,6 +35,7 @@ import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MappingLookup; @@ -55,6 +57,7 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.time.ZoneId; +import java.util.Arrays; import java.util.Locale; import java.util.Map; import java.util.Objects; @@ -68,12 +71,16 @@ * A {@link FieldMapper} for indexing a dense vector of floats. */ public class DenseVectorFieldMapper extends FieldMapper { + private static final float EPS = 1e-4f; public static final IndexVersion MAGNITUDE_STORED_INDEX_VERSION = IndexVersion.V_7_5_0; public static final IndexVersion INDEXED_BY_DEFAULT_INDEX_VERSION = IndexVersion.V_8_11_0; + public static final IndexVersion DOT_PRODUCT_AUTO_NORMALIZED = IndexVersion.V_8_11_0; public static final IndexVersion LITTLE_ENDIAN_FLOAT_STORED_INDEX_VERSION = IndexVersion.V_8_9_0; public static final String CONTENT_TYPE = "dense_vector"; public static short MAX_DIMS_COUNT = 2048; // maximum allowed number of dimensions + + public static short MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING = 128; // minimum number of dims for floats to be dynamically mapped to vector public static final int MAGNITUDE_BYTES = 4; private static DenseVectorFieldMapper toType(FieldMapper in) { @@ -89,22 +96,18 @@ public static class Builder extends FieldMapper.Builder { } return elementType; }, m -> toType(m).elementType, XContentBuilder::field, Objects::toString); - private final Parameter dims = new Parameter<>( - "dims", - false, - () -> null, - (n, c, o) -> XContentMapValues.nodeIntegerValue(o), - m -> toType(m).dims, - XContentBuilder::field, - Objects::toString - ).addValidator(dims -> { - if (dims == null) { - throw new MapperParsingException("Missing required parameter [dims] for field [" + name + "]"); + + // This is defined as updatable because it can be updated once, from [null] to a valid dim size, + // by a dynamic mapping update. Once it has been set, however, the value cannot be changed. + private final Parameter dims = new Parameter<>("dims", true, () -> null, (n, c, o) -> { + if (o instanceof Integer == false) { + throw new MapperParsingException("Property [dims] on field [" + n + "] must be an integer but got [" + o + "]"); } - if ((dims > MAX_DIMS_COUNT) || (dims < 1)) { + int dims = XContentMapValues.nodeIntegerValue(o); + if (dims < 1 || dims > MAX_DIMS_COUNT) { throw new MapperParsingException( "The number of dimensions for field [" - + name + + n + "] should be in the range [1, " + MAX_DIMS_COUNT + "] but was [" @@ -112,7 +115,9 @@ public static class Builder extends FieldMapper.Builder { + "]" ); } - }); + return dims; + }, m -> toType(m).dims, XContentBuilder::field, Object::toString).setSerializerCheck((id, ic, v) -> v != null) + .setMergeValidator((previous, current, c) -> previous == null || Objects.equals(previous, current)); private final Parameter similarity; private final Parameter indexOptions = new Parameter<>( "index_options", @@ -320,6 +325,7 @@ public void checkVectorBounds(float[] vector) { @Override void checkVectorMagnitude( + IndexVersion indexVersion, VectorSimilarity similarity, Function appender, float squaredMagnitude @@ -382,7 +388,12 @@ public Field parseKnnVector(DocumentParserContext context, DenseVectorFieldMappe squaredMagnitude += value * value; } fieldMapper.checkDimensionMatches(index, context); - checkVectorMagnitude(fieldMapper.similarity, errorByteElementsAppender(vector), squaredMagnitude); + checkVectorMagnitude( + fieldMapper.indexCreatedVersion, + fieldMapper.similarity, + errorByteElementsAppender(vector), + squaredMagnitude + ); return createKnnVectorField(fieldMapper.fieldType().name(), vector, fieldMapper.similarity.function); } @@ -474,20 +485,31 @@ public void checkVectorBounds(float[] vector) { @Override void checkVectorMagnitude( + IndexVersion indexVersion, VectorSimilarity similarity, Function appender, float squaredMagnitude ) { StringBuilder errorBuilder = null; - if (similarity == VectorSimilarity.DOT_PRODUCT && Math.abs(squaredMagnitude - 1.0f) > 1e-4f) { - errorBuilder = new StringBuilder( - "The [" + VectorSimilarity.DOT_PRODUCT + "] similarity can only be used with unit-length vectors." - ); - } else if (similarity == VectorSimilarity.COSINE && Math.sqrt(squaredMagnitude) == 0.0f) { - errorBuilder = new StringBuilder( - "The [" + VectorSimilarity.COSINE + "] similarity does not support vectors with zero magnitude." - ); + if (indexVersion.before(DOT_PRODUCT_AUTO_NORMALIZED)) { + if (similarity == VectorSimilarity.DOT_PRODUCT && Math.abs(squaredMagnitude - 1.0f) > EPS) { + errorBuilder = new StringBuilder( + "The [" + VectorSimilarity.DOT_PRODUCT + "] similarity can only be used with unit-length vectors." + ); + } + if (similarity == VectorSimilarity.COSINE && Math.sqrt(squaredMagnitude) == 0.0f) { + errorBuilder = new StringBuilder( + "The [" + similarity + "] similarity does not support vectors with zero magnitude." + ); + } + } else { + if ((similarity == VectorSimilarity.COSINE || similarity == VectorSimilarity.DOT_PRODUCT) + && Math.sqrt(squaredMagnitude) == 0.0f) { + errorBuilder = new StringBuilder( + "The [" + similarity + "] similarity does not support vectors with zero magnitude." + ); + } } if (errorBuilder != null) { @@ -510,7 +532,15 @@ public Field parseKnnVector(DocumentParserContext context, DenseVectorFieldMappe } fieldMapper.checkDimensionMatches(index, context); checkVectorBounds(vector); - checkVectorMagnitude(fieldMapper.similarity, errorFloatElementsAppender(vector), squaredMagnitude); + checkVectorMagnitude( + fieldMapper.indexCreatedVersion, + fieldMapper.similarity, + errorFloatElementsAppender(vector), + squaredMagnitude + ); + if (fieldMapper.indexCreatedVersion.onOrAfter(DOT_PRODUCT_AUTO_NORMALIZED)) { + fieldMapper.similarity.floatPreprocessing(vector, squaredMagnitude); + } return createKnnVectorField(fieldMapper.fieldType().name(), vector, fieldMapper.similarity.function); } @@ -568,11 +598,20 @@ abstract double parseKnnVectorToByteBuffer(DocumentParserContext context, DenseV public abstract void checkVectorBounds(float[] vector); abstract void checkVectorMagnitude( + IndexVersion indexVersion, VectorSimilarity similarity, Function errorElementsAppender, float squaredMagnitude ); + int parseDimensionCount(DocumentParserContext context) throws IOException { + int index = 0; + for (Token token = context.parser().nextToken(); token != Token.END_ARRAY; token = context.parser().nextToken()) { + index++; + } + return index; + } + void checkNanAndInfinite(float[] vector) { StringBuilder errorBuilder = null; @@ -678,6 +717,21 @@ float score(float similarity, ElementType elementType, int dim) { case FLOAT -> (1 + similarity) / 2f; }; } + + @Override + void floatPreprocessing(float[] vector, float squareSum) { + if (squareSum == 0) { + throw new IllegalArgumentException("Cannot normalize a zero-length vector"); + } + // Vector already has a magnitude have `1` + if (Math.abs(squareSum - 1.0f) < EPS) { + return; + } + float length = (float) Math.sqrt(squareSum); + for (int i = 0; i < vector.length; i++) { + vector[i] /= length; + } + } }; public final VectorSimilarityFunction function; @@ -692,6 +746,8 @@ public final String toString() { } abstract float score(float similarity, ElementType elementType, int dim); + + void floatPreprocessing(float[] vector, float squareSum) {} } private abstract static class IndexOptions implements ToXContent { @@ -763,7 +819,7 @@ public String toString() { public static final class DenseVectorFieldType extends SimpleMappedFieldType { private final ElementType elementType; - private final int dims; + private final Integer dims; private final boolean indexed; private final VectorSimilarity similarity; private final IndexVersion indexVersionCreated; @@ -772,7 +828,7 @@ public DenseVectorFieldType( String name, IndexVersion indexVersionCreated, ElementType elementType, - int dims, + Integer dims, boolean indexed, VectorSimilarity similarity, Map meta @@ -850,11 +906,13 @@ public Query createKnnQuery(byte[] queryVector, int numCands, Query filter, Floa } if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { - float squaredMagnitude = 0.0f; - for (byte b : queryVector) { - squaredMagnitude += b * b; - } - elementType.checkVectorMagnitude(similarity, elementType.errorByteElementsAppender(queryVector), squaredMagnitude); + int squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude( + indexVersionCreated, + similarity, + elementType.errorByteElementsAppender(queryVector), + squaredMagnitude + ); } Query knnQuery = new KnnByteVectorQuery(name(), queryVector, numCands, filter); if (similarityThreshold != null) { @@ -882,11 +940,22 @@ public Query createKnnQuery(float[] queryVector, int numCands, Query filter, Flo elementType.checkVectorBounds(queryVector); if (similarity == VectorSimilarity.DOT_PRODUCT || similarity == VectorSimilarity.COSINE) { - float squaredMagnitude = 0.0f; - for (float e : queryVector) { - squaredMagnitude += e * e; + float squaredMagnitude = VectorUtil.dotProduct(queryVector, queryVector); + elementType.checkVectorMagnitude( + indexVersionCreated, + similarity, + elementType.errorFloatElementsAppender(queryVector), + squaredMagnitude + ); + // We don't want to normalize the original query vector. + // It mutates it in place and might cause down stream weirdness + // Instead we copy the value and then normalize that copy + if (similarity == VectorSimilarity.DOT_PRODUCT + && elementType == ElementType.FLOAT + && indexVersionCreated.onOrAfter(DOT_PRODUCT_AUTO_NORMALIZED)) { + queryVector = Arrays.copyOf(queryVector, queryVector.length); + similarity.floatPreprocessing(queryVector, squaredMagnitude); } - elementType.checkVectorMagnitude(similarity, elementType.errorFloatElementsAppender(queryVector), squaredMagnitude); } Query knnQuery = switch (elementType) { case BYTE -> { @@ -922,7 +991,7 @@ ElementType getElementType() { } private final ElementType elementType; - private final int dims; + private final Integer dims; private final boolean indexed; private final VectorSimilarity similarity; private final IndexOptions indexOptions; @@ -932,7 +1001,7 @@ private DenseVectorFieldMapper( String simpleName, MappedFieldType mappedFieldType, ElementType elementType, - int dims, + Integer dims, boolean indexed, VectorSimilarity similarity, IndexOptions indexOptions, @@ -973,6 +1042,33 @@ public void parse(DocumentParserContext context) throws IOException { if (Token.VALUE_NULL == context.parser().currentToken()) { return; } + if (fieldType().dims == null) { + int dims = elementType.parseDimensionCount(context); + DenseVectorFieldType updatedDenseVectorFieldType = new DenseVectorFieldType( + fieldType().name(), + indexCreatedVersion, + elementType, + dims, + indexed, + similarity, + fieldType().meta() + ); + Mapper update = new DenseVectorFieldMapper( + simpleName(), + updatedDenseVectorFieldType, + elementType, + dims, + indexed, + similarity, + indexOptions, + indexCreatedVersion, + multiFields(), + copyTo + ); + context.addDynamicMapper(update); + + return; + } Field field = fieldType().indexed ? parseKnnVector(context) : parseBinaryDocValuesVector(context); context.doc().addWithKey(fieldType().name(), field); } diff --git a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java index ba45a700eebb5..082c2d898e637 100644 --- a/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java +++ b/server/src/main/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapper.java @@ -8,41 +8,48 @@ package org.elasticsearch.index.mapper.vectors; +import org.apache.lucene.document.FeatureField; import org.apache.lucene.search.Query; +import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.logging.DeprecationCategory; -import org.elasticsearch.common.logging.DeprecationLogger; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.analysis.NamedAnalyzer; +import org.elasticsearch.index.fielddata.FieldDataContext; +import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.mapper.DocumentParserContext; import org.elasticsearch.index.mapper.FieldMapper; import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.mapper.MapperBuilderContext; +import org.elasticsearch.index.mapper.SourceValueFetcher; import org.elasticsearch.index.mapper.TextSearchInfo; import org.elasticsearch.index.mapper.ValueFetcher; import org.elasticsearch.index.query.SearchExecutionContext; -import org.elasticsearch.search.DocValueFormat; +import org.elasticsearch.xcontent.XContentParser.Token; -import java.time.ZoneId; +import java.io.IOException; import java.util.Map; +import static org.elasticsearch.index.query.AbstractQueryBuilder.DEFAULT_BOOST; + /** - * A {@link FieldMapper} for indexing a sparse vector of floats. - * - * @deprecated The sparse_vector type was deprecated in 7.x and removed in 8.0. This mapper - * definition only exists so that 7.x indices can be read without error. - * - * TODO: remove in 9.0. + * A {@link FieldMapper} that exposes Lucene's {@link FeatureField} as a sparse + * vector of features. */ -@Deprecated public class SparseVectorFieldMapper extends FieldMapper { - private static final DeprecationLogger deprecationLogger = DeprecationLogger.getLogger(SparseVectorFieldMapper.class); - static final String ERROR_MESSAGE = "The [sparse_vector] field type is no longer supported."; - static final String ERROR_MESSAGE_7X = "The [sparse_vector] field type is no longer supported. Old 7.x indices are allowed to " - + "contain [sparse_vector] fields, but they cannot be indexed or searched."; + public static final String CONTENT_TYPE = "sparse_vector"; + static final String ERROR_MESSAGE_7X = "[sparse_vector] field type in old 7.x indices is allowed to " + + "contain [sparse_vector] fields, but they cannot be indexed or searched."; + static final String ERROR_MESSAGE_8X = "The [sparse_vector] field type is not supported from 8.0 to 8.10 versions."; + static final IndexVersion PREVIOUS_SPARSE_VECTOR_INDEX_VERSION = IndexVersion.V_8_0_0; + + static final IndexVersion NEW_SPARSE_VECTOR_INDEX_VERSION = IndexVersion.V_8_500_001; + public static class Builder extends FieldMapper.Builder { - final Parameter> meta = Parameter.metaParam(); + private final Parameter> meta = Parameter.metaParam(); public Builder(String name) { super(name); @@ -65,18 +72,19 @@ public SparseVectorFieldMapper build(MapperBuilderContext context) { } public static final TypeParser PARSER = new TypeParser((n, c) -> { - if (c.indexVersionCreated().onOrAfter(IndexVersion.V_8_0_0)) { - throw new IllegalArgumentException(ERROR_MESSAGE); - } else { + if (c.indexVersionCreated().before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { deprecationLogger.warn(DeprecationCategory.MAPPINGS, "sparse_vector", ERROR_MESSAGE_7X); - return new Builder(n); + } else if (c.indexVersionCreated().before(NEW_SPARSE_VECTOR_INDEX_VERSION)) { + throw new IllegalArgumentException(ERROR_MESSAGE_8X); } - }); + + return new Builder(n); + }, notInMultiFields(CONTENT_TYPE)); public static final class SparseVectorFieldType extends MappedFieldType { public SparseVectorFieldType(String name, Map meta) { - super(name, false, false, false, TextSearchInfo.NONE, meta); + super(name, true, false, false, TextSearchInfo.SIMPLE_MATCH_ONLY, meta); } @Override @@ -85,28 +93,45 @@ public String typeName() { } @Override - public DocValueFormat docValueFormat(String format, ZoneId timeZone) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + public Query existsQuery(SearchExecutionContext context) { + throw new IllegalArgumentException("[sparse_vector] fields do not support [exists] queries"); } @Override - public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + public IndexFieldData.Builder fielddataBuilder(FieldDataContext fieldDataContext) { + throw new IllegalArgumentException("[sparse_vector] fields do not support sorting, scripting or aggregating"); } @Override - public Query existsQuery(SearchExecutionContext context) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + public ValueFetcher valueFetcher(SearchExecutionContext context, String format) { + return SourceValueFetcher.identity(name(), context, format); } @Override public Query termQuery(Object value, SearchExecutionContext context) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + return FeatureField.newLinearQuery(name(), indexedValueForSearch(value), DEFAULT_BOOST); + } + + private static String indexedValueForSearch(Object value) { + if (value instanceof BytesRef) { + return ((BytesRef) value).utf8ToString(); + } + return value.toString(); } } private SparseVectorFieldMapper(String simpleName, MappedFieldType mappedFieldType, MultiFields multiFields, CopyTo copyTo) { - super(simpleName, mappedFieldType, multiFields, copyTo); + super(simpleName, mappedFieldType, multiFields, copyTo, false, null); + } + + @Override + public Map indexAnalyzers() { + return Map.of(mappedFieldType.name(), Lucene.KEYWORD_ANALYZER); + } + + @Override + public FieldMapper.Builder getMergeBuilder() { + return new Builder(simpleName()).init(this); } @Override @@ -115,13 +140,67 @@ public SparseVectorFieldType fieldType() { } @Override - public void parse(DocumentParserContext context) { - throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + protected boolean supportsParsingObject() { + return true; + } + + @Override + public void parse(DocumentParserContext context) throws IOException { + + // No support for indexing / searching 7.x sparse_vector field types + if (context.indexSettings().getIndexVersionCreated().before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { + throw new UnsupportedOperationException(ERROR_MESSAGE_7X); + } else if (context.indexSettings().getIndexVersionCreated().before(NEW_SPARSE_VECTOR_INDEX_VERSION)) { + throw new UnsupportedOperationException(ERROR_MESSAGE_8X); + } + + if (context.parser().currentToken() != Token.START_OBJECT) { + throw new IllegalArgumentException( + "[sparse_vector] fields must be json objects, expected a START_OBJECT but got: " + context.parser().currentToken() + ); + } + + String feature = null; + try { + // make sure that we don't expand dots in field names while parsing + context.path().setWithinLeafObject(true); + for (Token token = context.parser().nextToken(); token != Token.END_OBJECT; token = context.parser().nextToken()) { + if (token == Token.FIELD_NAME) { + feature = context.parser().currentName(); + if (feature.contains(".")) { + throw new IllegalArgumentException( + "[sparse_vector] fields do not support dots in feature names but found [" + feature + "]" + ); + } + } else if (token == Token.VALUE_NULL) { + // ignore feature, this is consistent with numeric fields + } else if (token == Token.VALUE_NUMBER || token == Token.VALUE_STRING) { + final String key = name() + "." + feature; + float value = context.parser().floatValue(true); + if (context.doc().getByKey(key) != null) { + throw new IllegalArgumentException( + "[sparse_vector] fields do not support indexing multiple values for the same feature [" + + key + + "] in the same document" + ); + } + context.doc().addWithKey(key, new FeatureField(name(), feature, value)); + } else { + throw new IllegalArgumentException( + "[sparse_vector] fields take hashes that map a feature to a strictly positive " + + "float, but got unexpected token " + + token + ); + } + } + } finally { + context.path().setWithinLeafObject(false); + } } @Override protected void parseCreateField(DocumentParserContext context) { - throw new IllegalStateException("parse is implemented directly"); + throw new AssertionError("parse is implemented directly"); } @Override @@ -129,8 +208,4 @@ protected String contentType() { return CONTENT_TYPE; } - @Override - public FieldMapper.Builder getMergeBuilder() { - return new Builder(simpleName()).init(this); - } } diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java index d0a67dcc34483..ac67c3c95719c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractGeometryQueryBuilder.java @@ -11,7 +11,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.client.internal.Client; @@ -158,7 +158,7 @@ protected AbstractGeometryQueryBuilder(StreamInput in) throws IOException { } else { shape = null; indexedShapeId = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String type = in.readOptionalString(); assert MapperService.SINGLE_MAPPING_NAME.equals(type) : "Expected type [_doc], got [" + type + "]"; } @@ -183,7 +183,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { GeometryIO.writeGeometry(out, shape); } else { out.writeOptionalString(indexedShapeId); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeOptionalString(indexedShapeIndex); diff --git a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java index 1a9fea929a20c..06ea1d7640f4f 100644 --- a/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/AbstractQueryBuilder.java @@ -267,7 +267,7 @@ protected static void writeQueries(StreamOutput out, List readQueries(StreamInput in) throws IOException { - return in.readNamedWriteableList(QueryBuilder.class); + return in.readNamedWriteableCollectionAsList(QueryBuilder.class); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java index cbec4ea4d324c..4b4727bca4198 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoolQueryBuilder.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -407,6 +408,6 @@ private static boolean rewriteClauses( @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java index fc08aa1f23cce..a857261bb62d9 100644 --- a/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/BoostingQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.queries.function.FunctionScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -230,6 +231,6 @@ protected void extractInnerHitBuilders(Map inner @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java index 86b4320659a00..ee1eb5ec3d5d3 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/CombinedFieldsQueryBuilder.java @@ -22,6 +22,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.QueryBuilder; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -459,6 +460,6 @@ protected boolean doEquals(CombinedFieldsQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_13_0; + return TransportVersions.V_7_13_0; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java index 2fb9275e125f7..c8249ce673b1e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/CommonTermsQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationLogger; @@ -63,6 +64,6 @@ public static CommonTermsQueryBuilder fromXContent(XContentParser parser) throws @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java index 80fd568f7210b..a87e45c797d14 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ConstantScoreQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -161,6 +162,6 @@ protected void extractInnerHitBuilders(Map inner @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java index 8cfc43f92fb03..92ad781da00aa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/DisMaxQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.DisjunctionMaxQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -224,6 +225,6 @@ protected void extractInnerHitBuilders(Map inner @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilder.java index 3b67f59e81c84..97f924a0c89ee 100644 --- a/server/src/main/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/DistanceFeatureQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.GeoUtils; @@ -199,6 +200,6 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_2_0; + return TransportVersions.V_7_2_0; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java index ec08a2fde0f8d..8645f6680a314 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ExistsQueryBuilder.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -177,6 +178,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java index 05723117ebea1..831bdadebd3e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/FieldMaskingSpanQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.queries.spans.SpanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -177,6 +178,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java index 1f0fcbab51fd4..a4b18d6e06d62 100644 --- a/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/FuzzyQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -360,6 +361,6 @@ protected boolean doEquals(FuzzyQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java index 9afadc1a8d9d8..3305e8ea58c6b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoBoundingBoxQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Numbers; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoBoundingBox; @@ -83,7 +84,7 @@ public GeoBoundingBoxQueryBuilder(StreamInput in) throws IOException { super(in); fieldName = in.readString(); geoBoundingBox = new GeoBoundingBox(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readVInt(); // ignore value } validationMethod = GeoValidationMethod.readFromStream(in); @@ -94,7 +95,7 @@ public GeoBoundingBoxQueryBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); geoBoundingBox.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeVInt(0); } validationMethod.writeTo(out); @@ -407,6 +408,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java index 5b35a9af99931..763eb4fd5f3bf 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoDistanceQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoDistance; @@ -402,6 +403,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java index a662d08fdd474..21ff620907b0a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoPolygonQueryBuilder.java @@ -15,6 +15,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.GeoPoint; @@ -329,6 +330,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java index fb342d6b71a98..687df8af1adf1 100644 --- a/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/GeoShapeQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.geo.ShapeRelation; @@ -271,6 +272,6 @@ public static GeoShapeQueryBuilder fromXContent(XContentParser parser) throws IO @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java index 3d2991eebbea7..a95740aa7f32e 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IdsQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -53,7 +54,7 @@ public IdsQueryBuilder() { */ public IdsQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore String[] types = in.readStringArray(); if (types.length > 0) { @@ -65,11 +66,11 @@ public IdsQueryBuilder(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } - out.writeStringArray(ids.toArray(new String[ids.size()])); + out.writeStringCollection(ids); } /** @@ -156,6 +157,6 @@ protected boolean doEquals(IdsQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java index 39b62b54bf087..750553ef0a001 100644 --- a/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/InnerHitBuilder.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.index.query; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -170,7 +170,7 @@ public InnerHitBuilder(StreamInput in) throws IOException { seqNoAndPrimaryTerm = in.readBoolean(); trackScores = in.readBoolean(); storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); - docValueFields = in.readBoolean() ? in.readList(FieldAndFormat::new) : null; + docValueFields = in.readBoolean() ? in.readCollectionAsList(FieldAndFormat::new) : null; if (in.readBoolean()) { int size = in.readVInt(); scriptFields = Sets.newHashSetWithExpectedSize(size); @@ -189,9 +189,9 @@ public InnerHitBuilder(StreamInput in) throws IOException { highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); this.innerCollapseBuilder = in.readOptionalWriteable(CollapseBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { if (in.readBoolean()) { - fetchFields = in.readList(FieldAndFormat::new); + fetchFields = in.readCollectionAsList(FieldAndFormat::new); } } } @@ -209,7 +209,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(storedFieldsContext); out.writeBoolean(docValueFields != null); if (docValueFields != null) { - out.writeList(docValueFields); + out.writeCollection(docValueFields); } boolean hasScriptFields = scriptFields != null; out.writeBoolean(hasScriptFields); @@ -224,15 +224,15 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasSorts = sorts != null; out.writeBoolean(hasSorts); if (hasSorts) { - out.writeNamedWriteableList(sorts); + out.writeNamedWriteableCollection(sorts); } out.writeOptionalWriteable(highlightBuilder); out.writeOptionalWriteable(innerCollapseBuilder); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { - out.writeList(fetchFields); + out.writeCollection(fetchFields); } } } diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java index 5d42a6eb750db..c3ca5d8abccdb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -156,6 +157,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java index a0ace42c40883..9a326cf927cf6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java +++ b/server/src/main/java/org/elasticsearch/index/query/IntervalsSourceProvider.java @@ -13,7 +13,7 @@ import org.apache.lucene.queries.intervals.Intervals; import org.apache.lucene.queries.intervals.IntervalsSource; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -120,7 +120,7 @@ public Match(StreamInput in) throws IOException { this.ordered = in.readBoolean(); this.analyzer = in.readOptionalString(); this.filter = in.readOptionalWriteable(IntervalFilter::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { this.useField = in.readOptionalString(); } else { this.useField = null; @@ -204,7 +204,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(ordered); out.writeOptionalString(analyzer); out.writeOptionalWriteable(filter); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { out.writeOptionalString(useField); } } @@ -288,7 +288,7 @@ public Disjunction(List subSources, IntervalFilter filt } public Disjunction(StreamInput in) throws IOException { - this.subSources = in.readNamedWriteableList(IntervalsSourceProvider.class); + this.subSources = in.readNamedWriteableCollectionAsList(IntervalsSourceProvider.class); this.filter = in.readOptionalWriteable(IntervalFilter::new); } @@ -332,7 +332,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(subSources); + out.writeNamedWriteableCollection(subSources); out.writeOptionalWriteable(filter); } @@ -398,7 +398,7 @@ public Combine(List subSources, boolean ordered, int ma public Combine(StreamInput in) throws IOException { this.ordered = in.readBoolean(); - this.subSources = in.readNamedWriteableList(IntervalsSourceProvider.class); + this.subSources = in.readNamedWriteableCollectionAsList(IntervalsSourceProvider.class); this.maxGaps = in.readInt(); this.filter = in.readOptionalWriteable(IntervalFilter::new); } @@ -447,7 +447,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(ordered); - out.writeNamedWriteableList(subSources); + out.writeNamedWriteableCollection(subSources); out.writeInt(maxGaps); out.writeOptionalWriteable(filter); } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java index ff50fd1e8e8a6..9b7040e4e7bdc 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchAllQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -83,6 +84,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java index 5d6a5892e6caa..0f9105134961a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchBoolPrefixQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -405,6 +406,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_2_0; + return TransportVersions.V_7_2_0; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java index ca9e1c6428864..97bea569a3c95 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchNoneQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -38,14 +39,14 @@ public MatchNoneQueryBuilder(String rewriteReason) { */ public MatchNoneQueryBuilder(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_029)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_029)) { rewriteReason = in.readOptionalString(); } } @Override protected void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_029)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_029)) { out.writeOptionalString(rewriteReason); } } @@ -115,6 +116,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java index d7df607a2c701..b0dd18fdcbee5 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhrasePrefixQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -64,7 +65,7 @@ public MatchPhrasePrefixQueryBuilder(StreamInput in) throws IOException { slop = in.readVInt(); maxExpansions = in.readVInt(); analyzer = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { this.zeroTermsQuery = ZeroTermsQueryOption.readFromStream(in); } } @@ -76,7 +77,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(slop); out.writeVInt(maxExpansions); out.writeOptionalString(analyzer); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { zeroTermsQuery.writeTo(out); } } @@ -293,6 +294,6 @@ public static MatchPhrasePrefixQueryBuilder fromXContent(XContentParser parser) @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java index f1dc5d1259556..48448ab806faa 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchPhraseQueryBuilder.java @@ -11,6 +11,7 @@ import org.apache.lucene.analysis.core.KeywordAnalyzer; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -286,6 +287,6 @@ public static MatchPhraseQueryBuilder fromXContent(XContentParser parser) throws @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java index e06051154c547..0d11289a0999a 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MatchQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -117,7 +118,7 @@ public MatchQueryBuilder(StreamInput in) throws IOException { fuzzyRewrite = in.readOptionalString(); fuzziness = in.readOptionalWriteable(Fuzziness::new); // cutoff_frequency has been removed - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalFloat(); } autoGenerateSynonymsPhraseQuery = in.readBoolean(); @@ -139,7 +140,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(fuzzyRewrite); out.writeOptionalWriteable(fuzziness); // cutoff_frequency has been removed - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalFloat(null); } out.writeBoolean(autoGenerateSynonymsPhraseQuery); @@ -574,6 +575,6 @@ public static MatchQueryBuilder fromXContent(XContentParser parser) throws IOExc @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java index de168614ccdd7..acf4893de5280 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MoreLikeThisQueryBuilder.java @@ -16,6 +16,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.RoutingMissingException; import org.elasticsearch.action.termvectors.MultiTermVectorsItemResponse; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; @@ -206,7 +207,7 @@ public Item(@Nullable String index, XContentBuilder doc) { @SuppressWarnings("unchecked") Item(StreamInput in) throws IOException { index = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore String type = in.readOptionalString(); if (type != null) { @@ -229,7 +230,7 @@ public Item(@Nullable String index, XContentBuilder doc) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(index); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeOptionalString(null); } @@ -515,9 +516,9 @@ public MoreLikeThisQueryBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalStringArray(fields); out.writeStringArray(likeTexts); - out.writeList(Arrays.asList(likeItems)); + out.writeCollection(Arrays.asList(likeItems)); out.writeStringArray(unlikeTexts); - out.writeList(Arrays.asList(unlikeItems)); + out.writeCollection(Arrays.asList(unlikeItems)); out.writeVInt(maxQueryTerms); out.writeVInt(minTermFreq); out.writeVInt(minDocFreq); @@ -1186,6 +1187,6 @@ protected QueryBuilder doRewrite(QueryRewriteContext queryRewriteContext) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java index dfc6a95bb7ffd..e591f7e07e2ba 100644 --- a/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/MultiMatchQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -223,7 +224,7 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { fuzzyRewrite = in.readOptionalString(); tieBreaker = in.readOptionalFloat(); lenient = in.readOptionalBoolean(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalFloat(); } zeroTermsQuery = ZeroTermsQueryOption.readFromStream(in); @@ -234,7 +235,7 @@ public MultiMatchQueryBuilder(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeGenericValue(value); - out.writeMap(fieldsBoosts, StreamOutput::writeString, StreamOutput::writeFloat); + out.writeMap(fieldsBoosts, StreamOutput::writeFloat); type.writeTo(out); operator.writeTo(out); out.writeOptionalString(analyzer); @@ -246,7 +247,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(fuzzyRewrite); out.writeOptionalFloat(tieBreaker); out.writeOptionalBoolean(lenient); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalFloat(null); } zeroTermsQuery.writeTo(out); @@ -827,6 +828,6 @@ protected boolean doEquals(MultiMatchQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java index d36e2f7d7eef0..83d31c71d4f76 100644 --- a/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/NestedQueryBuilder.java @@ -25,6 +25,7 @@ import org.apache.lucene.search.join.ScoreMode; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.search.MaxScoreCollector; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -439,6 +440,6 @@ public TopDocsAndMaxScore topDocs(SearchHit hit) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java index d86754028dceb..5042ab358a96c 100644 --- a/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/PrefixQueryBuilder.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.MultiTermQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -72,7 +73,7 @@ public PrefixQueryBuilder(StreamInput in) throws IOException { fieldName = in.readString(); value = in.readString(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { caseInsensitive = in.readBoolean(); } } @@ -82,7 +83,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeString(value); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(caseInsensitive); } } @@ -232,6 +233,6 @@ protected boolean doEquals(PrefixQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java index bd0af0ac8c129..fa07cceeece30 100644 --- a/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/QueryStringQueryBuilder.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.automaton.Operations; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -192,7 +193,7 @@ public QueryStringQueryBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(this.queryString); out.writeOptionalString(this.defaultField); - out.writeMap(this.fieldsAndWeights, StreamOutput::writeString, StreamOutput::writeFloat); + out.writeMap(this.fieldsAndWeights, StreamOutput::writeFloat); this.defaultOperator.writeTo(out); out.writeOptionalString(this.analyzer); out.writeOptionalString(this.quoteAnalyzer); @@ -968,6 +969,6 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java index 6a5e10b0e79d2..4d2a6d3eaecdb 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RangeQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.geo.ShapeRelation; @@ -559,6 +560,6 @@ protected boolean doEquals(RangeQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java index ad9afc79faf67..b46e30401a0a2 100644 --- a/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/RegexpQueryBuilder.java @@ -15,6 +15,7 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.automaton.RegExp; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -86,7 +87,7 @@ public RegexpQueryBuilder(StreamInput in) throws IOException { syntaxFlagsValue = in.readVInt(); maxDeterminizedStates = in.readVInt(); rewrite = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { caseInsensitive = in.readBoolean(); } } @@ -98,7 +99,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeVInt(syntaxFlagsValue); out.writeVInt(maxDeterminizedStates); out.writeOptionalString(rewrite); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(caseInsensitive); } } @@ -321,6 +322,6 @@ protected boolean doEquals(RegexpQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java index 6200d660d5657..597e23881362b 100644 --- a/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/ScriptQueryBuilder.java @@ -21,6 +21,7 @@ import org.apache.lucene.search.Weight; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -225,6 +226,6 @@ protected boolean doEquals(ScriptQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java index 603654dcd6340..15ed11654b9e6 100644 --- a/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/SimpleQueryStringBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.FuzzyQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -90,7 +91,7 @@ public class SimpleQueryStringBuilder extends AbstractQueryBuilder { public static final String NAME = "terms"; - private static final TransportVersion VERSION_STORE_VALUES_AS_BYTES_REFERENCE = TransportVersion.V_7_12_0; + private static final TransportVersion VERSION_STORE_VALUES_AS_BYTES_REFERENCE = TransportVersions.V_7_12_0; private final String fieldName; private final Values values; @@ -666,6 +667,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java index bf8ef78751b01..ce6ab1db835ab 100644 --- a/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/TermsSetQueryBuilder.java @@ -19,6 +19,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.TermQuery; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -44,7 +45,7 @@ public final class TermsSetQueryBuilder extends AbstractQueryBuilder inner @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/index/query/functionscore/WeightBuilder.java b/server/src/main/java/org/elasticsearch/index/query/functionscore/WeightBuilder.java index 2af55eaaf844b..ee6b720b97442 100644 --- a/server/src/main/java/org/elasticsearch/index/query/functionscore/WeightBuilder.java +++ b/server/src/main/java/org/elasticsearch/index/query/functionscore/WeightBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.query.functionscore; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.function.ScoreFunction; @@ -58,7 +59,7 @@ protected int doHashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java b/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java index 5b627e4a77eb8..ee95fdd925c00 100644 --- a/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java +++ b/server/src/main/java/org/elasticsearch/index/refresh/RefreshStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.refresh; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -39,7 +39,7 @@ public RefreshStats() {} public RefreshStats(StreamInput in) throws IOException { total = in.readVLong(); totalTimeInMillis = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { externalTotal = in.readVLong(); externalTotalTimeInMillis = in.readVLong(); } @@ -50,7 +50,7 @@ public RefreshStats(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeVLong(total); out.writeVLong(totalTimeInMillis); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { out.writeVLong(externalTotal); out.writeVLong(externalTotalTimeInMillis); } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java index eca77904e998f..fedcbc1a076d0 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollResponse.java @@ -68,8 +68,8 @@ public BulkByScrollResponse(StreamInput in) throws IOException { super(in); took = in.readTimeValue(); status = new BulkByScrollTask.Status(in); - bulkFailures = in.readList(Failure::new); - searchFailures = in.readList(ScrollableHitSource.SearchFailure::new); + bulkFailures = in.readCollectionAsList(Failure::new); + searchFailures = in.readCollectionAsList(ScrollableHitSource.SearchFailure::new); timedOut = in.readBoolean(); } @@ -185,8 +185,8 @@ public boolean isTimedOut() { public void writeTo(StreamOutput out) throws IOException { out.writeTimeValue(took); status.writeTo(out); - out.writeList(bulkFailures); - out.writeList(searchFailures); + out.writeCollection(bulkFailures); + out.writeCollection(searchFailures); out.writeBoolean(timedOut); } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java index c438834ff3d68..150948b4e5822 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/BulkByScrollTask.java @@ -537,7 +537,7 @@ public Status(StreamInput in) throws IOException { requestsPerSecond = in.readFloat(); reasonCancelled = in.readOptionalString(); throttledUntil = in.readTimeValue(); - sliceStatuses = in.readList(stream -> stream.readOptionalWriteable(StatusOrException::new)); + sliceStatuses = in.readCollectionAsList(stream -> stream.readOptionalWriteable(StatusOrException::new)); } @Override diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java index f168ef9ec2dcf..68eec49f082ce 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RemoteInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.reindex; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -99,7 +99,7 @@ public RemoteInfo(StreamInput in) throws IOException { port = in.readVInt(); query = in.readBytesReference(); username = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_2_0)) { password = new SecureString(in.readOptionalString().toCharArray()); } else { password = in.readOptionalSecureString(); @@ -122,7 +122,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(port); out.writeBytesReference(query); out.writeOptionalString(username); - if (out.getTransportVersion().before(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_2_0)) { out.writeOptionalString(password.toString()); } else { out.writeOptionalSecureString(password); diff --git a/server/src/main/java/org/elasticsearch/index/reindex/RetryListener.java b/server/src/main/java/org/elasticsearch/index/reindex/RetryListener.java index 45def8bca9aed..b56bed989b6dd 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/RetryListener.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/RetryListener.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DelegatingActionListener; import org.elasticsearch.action.bulk.BackoffPolicy; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.ThreadPool; @@ -60,6 +61,6 @@ public void onRejection(Exception e) { } private void schedule(Runnable runnable, TimeValue delay) { - threadPool.schedule(runnable, delay, ThreadPool.Names.SAME); + threadPool.schedule(runnable, delay, EsExecutors.DIRECT_EXECUTOR_SERVICE); } } diff --git a/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java b/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java index 43bd7ba6f7e5e..e167994e0929f 100644 --- a/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java +++ b/server/src/main/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskState.java @@ -252,7 +252,7 @@ class DelayedPrepareBulkRequest { this.scheduled = threadPool.schedule(() -> { throttledNanos.addAndGet(delay.nanos()); command.run(); - }, delay, ThreadPool.Names.GENERIC); + }, delay, threadPool.generic()); } DelayedPrepareBulkRequest rethrottle(float newRequestsPerSecond) { diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java index 1a0be7523ba08..04f3e49fec820 100644 --- a/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java +++ b/server/src/main/java/org/elasticsearch/index/search/stats/FieldUsageStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.search.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -57,7 +57,7 @@ public FieldUsageStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(stats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(stats, StreamOutput::writeWriteable); } @Override @@ -210,7 +210,7 @@ public PerFieldUsageStats(StreamInput in) throws IOException { payloads = in.readVLong(); termVectors = in.readVLong(); points = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { knnVectors = in.readVLong(); } else { knnVectors = 0; @@ -232,7 +232,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(payloads); out.writeVLong(termVectors); out.writeVLong(points); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeVLong(knnVectors); } } diff --git a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java index 22fdf8ecd2bc3..e1953e1ff2b40 100644 --- a/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java +++ b/server/src/main/java/org/elasticsearch/index/search/stats/SearchStats.java @@ -398,7 +398,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeMap(groupStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + out.writeMap(groupStats, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java index c98b111ad573d..29537b4ddbdb9 100644 --- a/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java +++ b/server/src/main/java/org/elasticsearch/index/seqno/ReplicationTracker.java @@ -1602,7 +1602,7 @@ public IndexShardRoutingTable getRoutingTable() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(clusterStateVersion); - out.writeMap(checkpoints, (streamOutput, s) -> out.writeString(s), (streamOutput, cps) -> cps.writeTo(out)); + out.writeMap(checkpoints, StreamOutput::writeWriteable); IndexShardRoutingTable.Builder.writeTo(routingTable, out); } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java index 22f3da92eebaf..1823c4bdfbfa1 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexingStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.shard; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,7 @@ public class IndexingStats implements Writeable, ToXContentFragment { public static class Stats implements Writeable, ToXContentFragment { - private static final TransportVersion WRITE_LOAD_AVG_SUPPORTED_VERSION = TransportVersion.V_8_6_0; + private static final TransportVersion WRITE_LOAD_AVG_SUPPORTED_VERSION = TransportVersions.V_8_6_0; private long indexCount; private long indexTimeInMillis; @@ -268,7 +269,7 @@ public IndexingStats() { public IndexingStats(StreamInput in) throws IOException { totalStats = new Stats(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { if (in.readBoolean()) { Map typeStats = in.readMap(Stats::new); assert typeStats.size() == 1; @@ -349,7 +350,7 @@ static final class Fields { @Override public void writeTo(StreamOutput out) throws IOException { totalStats.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeBoolean(false); } } diff --git a/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java b/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java index e4db6921a44d2..81eb40e6f6f61 100644 --- a/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java +++ b/server/src/main/java/org/elasticsearch/index/stats/IndexingPressureStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.index.stats; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -57,7 +57,7 @@ public IndexingPressureStats(StreamInput in) throws IOException { primaryRejections = in.readVLong(); replicaRejections = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { memoryLimit = in.readVLong(); } else { memoryLimit = -1L; @@ -129,7 +129,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(primaryRejections); out.writeVLong(replicaRejections); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeVLong(memoryLimit); } } diff --git a/server/src/main/java/org/elasticsearch/index/store/Store.java b/server/src/main/java/org/elasticsearch/index/store/Store.java index 6daef1f1ab329..015fbf06e042c 100644 --- a/server/src/main/java/org/elasticsearch/index/store/Store.java +++ b/server/src/main/java/org/elasticsearch/index/store/Store.java @@ -885,7 +885,7 @@ public static MetadataSnapshot readFrom(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeMapValues(fileMetadataMap); - out.writeMap(commitUserData, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(commitUserData, StreamOutput::writeString); out.writeLong(numDocs); } diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java b/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java index d9422c648ef8a..9a2655cd6e56f 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreFileMetadata.java @@ -16,6 +16,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.Version; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -29,7 +30,7 @@ public class StoreFileMetadata implements Writeable { public static final BytesRef UNAVAILABLE_WRITER_UUID = new BytesRef(); - private static final TransportVersion WRITER_UUID_MIN_VERSION = TransportVersion.V_7_16_0; + private static final TransportVersion WRITER_UUID_MIN_VERSION = TransportVersions.V_7_16_0; private final String name; diff --git a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java index 0820f859355ae..cda87a421bd32 100644 --- a/server/src/main/java/org/elasticsearch/index/store/StoreStats.java +++ b/server/src/main/java/org/elasticsearch/index/store/StoreStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.store; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,8 +28,8 @@ public class StoreStats implements Writeable, ToXContentFragment { */ public static final long UNKNOWN_RESERVED_BYTES = -1L; - public static final TransportVersion RESERVED_BYTES_VERSION = TransportVersion.V_7_9_0; - public static final TransportVersion TOTAL_DATA_SET_SIZE_SIZE_VERSION = TransportVersion.V_7_13_0; + public static final TransportVersion RESERVED_BYTES_VERSION = TransportVersions.V_7_9_0; + public static final TransportVersion TOTAL_DATA_SET_SIZE_SIZE_VERSION = TransportVersions.V_7_13_0; private long sizeInBytes; private long totalDataSetSizeInBytes; diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 871de3c9f9f87..ed273a8701c41 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -9,7 +9,7 @@ package org.elasticsearch.index.translog; import org.apache.lucene.store.AlreadyClosedException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; @@ -1223,7 +1223,7 @@ public long version() { @Override public void writeBody(final StreamOutput out) throws IOException { - final int format = out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + final int format = out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE; out.writeVInt(format); @@ -1364,7 +1364,7 @@ public long version() { @Override public void writeBody(final StreamOutput out) throws IOException { - final int format = out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) + final int format = out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? SERIALIZATION_FORMAT : FORMAT_NO_VERSION_TYPE; out.writeVInt(format); @@ -1504,7 +1504,7 @@ public static List readOperations(StreamInput input, String source) t ArrayList operations = new ArrayList<>(); int numOps = input.readInt(); final BufferedChecksumStreamInput checksumStreamInput = new BufferedChecksumStreamInput(input, source); - if (input.getTransportVersion().before(TransportVersion.V_8_8_0)) { + if (input.getTransportVersion().before(TransportVersions.V_8_8_0)) { for (int i = 0; i < numOps; i++) { operations.add(readOperation(checksumStreamInput)); } @@ -1554,7 +1554,7 @@ public static void writeOperations(StreamOutput outStream, List toWri if (size == 0) { return; } - if (outStream.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (outStream.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { final BufferedChecksumStreamOutput checksumStreamOutput = new BufferedChecksumStreamOutput(outStream); for (Operation op : toWrite) { writeOperationNoSize(checksumStreamOutput, op); diff --git a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java index 2a705dfb8b38e..c0271ad30d720 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java +++ b/server/src/main/java/org/elasticsearch/indices/IndexingMemoryController.java @@ -16,7 +16,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.shard.IndexShard; @@ -25,15 +25,18 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; import java.io.Closeable; import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.Deque; import java.util.EnumSet; import java.util.HashSet; import java.util.List; -import java.util.PriorityQueue; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentLinkedDeque; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; @@ -105,6 +108,9 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos private final ShardsIndicesStatusChecker statusChecker; + private final Set pendingWriteIndexingBufferSet = Collections.newSetFromMap(new ConcurrentHashMap<>()); + private final Deque pendingWriteIndexingBufferQueue = new ConcurrentLinkedDeque<>(); + IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable indexServices) { this.indexShards = indexServices; @@ -147,7 +153,7 @@ public class IndexingMemoryController implements IndexingOperationListener, Clos protected Cancellable scheduleTask(ThreadPool threadPool) { // it's fine to run it on the scheduler thread, no busy work - return threadPool.scheduleWithFixedDelay(statusChecker, interval, Names.SAME); + return threadPool.scheduleWithFixedDelay(statusChecker, interval, EsExecutors.DIRECT_EXECUTOR_SERVICE); } @Override @@ -183,19 +189,41 @@ protected long getShardWritingBytes(IndexShard shard) { return shard.getWritingBytes(); } - /** ask this shard to refresh, in the background, to free up heap */ - protected void writeIndexingBufferAsync(IndexShard shard) { - threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() { - @Override - public void doRun() { - shard.writeIndexingBuffer(); - } + /** Record that the given shard needs to write its indexing buffer. */ + protected void enqueueWriteIndexingBuffer(IndexShard shard) { + if (pendingWriteIndexingBufferSet.add(shard)) { + pendingWriteIndexingBufferQueue.addLast(shard); + } + // Else there is already a queued task for the same shard and there is no evidence that adding another one is required since we'd + // need the first one to start running to know about the number of bytes still not being written. + } - @Override - public void onFailure(Exception e) { - logger.warn(() -> "failed to write indexing buffer for shard [" + shard.shardId() + "]; ignoring", e); - } - }); + /** + * Write pending indexing buffers. This should run on indexing threads in order to naturally apply back pressure on indexing. Lucene has + * similar logic in DocumentsWriter#postUpdate. + */ + private boolean writePendingIndexingBuffers() { + boolean wrotePendingIndexingBuffer = false; + for (IndexShard shard = pendingWriteIndexingBufferQueue.pollFirst(); shard != null; shard = pendingWriteIndexingBufferQueue + .pollFirst()) { + // Remove the shard from the set first, so that multiple threads can run writeIndexingBuffer concurrently on the same shard. + pendingWriteIndexingBufferSet.remove(shard); + shard.writeIndexingBuffer(); + wrotePendingIndexingBuffer = true; + } + return wrotePendingIndexingBuffer; + } + + private void writePendingIndexingBuffersAsync() { + for (IndexShard shard = pendingWriteIndexingBufferQueue.pollFirst(); shard != null; shard = pendingWriteIndexingBufferQueue + .pollFirst()) { + final IndexShard finalShard = shard; + threadPool.executor(ThreadPool.Names.REFRESH).execute(() -> { + // Remove the shard from the set first, so that multiple threads can run writeIndexingBuffer concurrently on the same shard. + pendingWriteIndexingBufferSet.remove(finalShard); + finalShard.writeIndexingBuffer(); + }); + } } /** force checker to run now */ @@ -215,12 +243,26 @@ protected void deactivateThrottling(IndexShard shard) { @Override public void postIndex(ShardId shardId, Engine.Index index, Engine.IndexResult result) { - recordOperationBytes(index, result); + postOperation(shardId, index, result); } @Override public void postDelete(ShardId shardId, Engine.Delete delete, Engine.DeleteResult result) { - recordOperationBytes(delete, result); + postOperation(shardId, delete, result); + } + + private void postOperation(ShardId shardId, Engine.Operation operation, Engine.Result result) { + recordOperationBytes(operation, result); + // Piggy back on indexing threads to write segments. We're not submitting a task to the index threadpool because we want memory to + // be reclaimed rapidly. This has the downside of increasing the latency of _bulk requests though. Lucene does the same thing in + // DocumentsWriter#postUpdate, flushing a segment because the size limit on the RAM buffer was reached happens on the call to + // IndexWriter#addDocument. + while (writePendingIndexingBuffers()) { + // If we just wrote segments, then run the checker again if not already running to check if we released enough memory. + if (statusChecker.tryRun() == false) { + break; + } + } } /** called by IndexShard to record estimated bytes written to translog for the operation */ @@ -230,7 +272,7 @@ private void recordOperationBytes(Engine.Operation operation, Engine.Result resu } } - private static final class ShardAndBytesUsed implements Comparable { + private static final class ShardAndBytesUsed { final long bytesUsed; final IndexShard shard; @@ -239,11 +281,6 @@ private static final class ShardAndBytesUsed implements Comparable= 0; - while (totalBytes > indexingBuffer.getBytes() / 30) { + while (totalBytes > indexingBuffer.getBytes() / 128) { if (runLock.tryLock()) { try { // Must pull this again because it may have changed since we first checked: totalBytes = bytesWrittenSinceCheck.get(); - if (totalBytes > indexingBuffer.getBytes() / 30) { + if (totalBytes > indexingBuffer.getBytes() / 128) { bytesWrittenSinceCheck.addAndGet(-totalBytes); // NOTE: this is only an approximate check, because bytes written is to the translog, // vs indexing memory buffer which is typically smaller but can be larger in extreme @@ -284,8 +323,24 @@ public void bytesWritten(int bytes) { } } + public boolean tryRun() { + if (runLock.tryLock()) { + try { + runUnlocked(); + } finally { + runLock.unlock(); + } + return true; + } else { + return false; + } + } + @Override public void run() { + // If there are any remainders from the previous check, schedule them now. Most of the time, indexing threads would have taken + // care of these indexing buffers before, and we wouldn't need to do it here. + writePendingIndexingBuffersAsync(); runLock.lock(); try { runUnlocked(); @@ -295,6 +350,7 @@ public void run() { } private void runUnlocked() { + assert runLock.isHeldByCurrentThread() : "ShardsIndicesStatusChecker#runUnlocked must always run under the run lock"; // NOTE: even if we hit an errant exc here, our ThreadPool.scheduledWithFixedDelay will log the exception and re-invoke us // again, on schedule @@ -341,7 +397,7 @@ private void runUnlocked() { if (totalBytesUsed > indexingBuffer.getBytes()) { // OK we are now over-budget; fill the priority queue and ask largest shard(s) to refresh: - PriorityQueue queue = new PriorityQueue<>(); + List queue = new ArrayList<>(); for (IndexShard shard : availableShards()) { // How many bytes this shard is currently (async'd) moving from heap to disk: @@ -386,21 +442,56 @@ private void runUnlocked() { queue.size() ); - while (totalBytesUsed > indexingBuffer.getBytes() && queue.isEmpty() == false) { - ShardAndBytesUsed largest = queue.poll(); + // What is the best order to go over shards and reclaim memory usage? Interestingly, picking random shards performs _much_ + // better than picking the largest shard when trying to optimize for the elastic/logs Rally track. One explanation for this + // is that Lucene's IndexWriter creates new pending segments in memory in order to satisfy indexing concurrency. E.g. if N + // indexing threads suddenly index into the same IndexWriter, then the IndexWriter will have N pending segments in memory. + // However, it's likely that indexing concurrency is not constant on a per-shard basis, especially when indexing into many + // shards concurrently. So there are chances that if we flush a single segment now, then it won't be re-created shortly + // because the peak indexing concurrency is rarely observed, and we end up indexing into fewer pending segments globally on + // average, which in-turn reduces the total number of segments that get produced, and also reduces merging. + // The downside of picking the shard that has the biggest indexing buffer is that it is often also the shard that has the + // highest ingestion rate, and thus it is also the shard that is the most likely to re-create a new pending segment in the + // very near future after one segment has been flushed. + + // We want to go over shards in a round-robin fashion across calls to #runUnlocked. First sort shards by something stable + // like the shard ID. + queue.sort(Comparator.comparing(shardAndBytes -> shardAndBytes.shard.shardId())); + if (lastShardId != null) { + // Then rotate the list so that the first shard that is greater than the ID of the last shard whose indexing buffer was + // written comes first. + int nextShardIdIndex = 0; + for (ShardAndBytesUsed shardAndBytes : queue) { + if (shardAndBytes.shard.shardId().compareTo(lastShardId) > 0) { + break; + } + nextShardIdIndex++; + } + Collections.rotate(queue, -nextShardIdIndex); + } + + for (ShardAndBytesUsed shardAndBytesUsed : queue) { logger.debug( "write indexing buffer to disk for shard [{}] to free up its [{}] indexing buffer", - largest.shard.shardId(), - ByteSizeValue.ofBytes(largest.bytesUsed) + shardAndBytesUsed.shard.shardId(), + ByteSizeValue.ofBytes(shardAndBytesUsed.bytesUsed) ); - writeIndexingBufferAsync(largest.shard); - totalBytesUsed -= largest.bytesUsed; - if (doThrottle && throttled.contains(largest.shard) == false) { - logger.info("now throttling indexing for shard [{}]: segment writing can't keep up", largest.shard.shardId()); - throttled.add(largest.shard); - activateThrottling(largest.shard); + enqueueWriteIndexingBuffer(shardAndBytesUsed.shard); + totalBytesUsed -= shardAndBytesUsed.bytesUsed; + lastShardId = shardAndBytesUsed.shard.shardId(); + if (doThrottle && throttled.contains(shardAndBytesUsed.shard) == false) { + logger.debug( + "now throttling indexing for shard [{}]: segment writing can't keep up", + shardAndBytesUsed.shard.shardId() + ); + throttled.add(shardAndBytesUsed.shard); + activateThrottling(shardAndBytesUsed.shard); + } + if (totalBytesUsed <= indexingBuffer.getBytes()) { + break; } } + } if (doThrottle == false) { diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java index 112d83c34d4af..7394e5eb89458 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesQueryCache.java @@ -25,13 +25,13 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.Nullable; import org.elasticsearch.index.cache.query.QueryCacheStats; import org.elasticsearch.index.shard.ShardId; import java.io.Closeable; import java.io.IOException; import java.util.Collections; -import java.util.HashMap; import java.util.IdentityHashMap; import java.util.Map; import java.util.Set; @@ -85,34 +85,58 @@ public IndicesQueryCache(Settings settings) { sharedRamBytesUsed = 0; } - /** Get usage statistics for the given shard. */ - public QueryCacheStats getStats(ShardId shard) { - final Map stats = new HashMap<>(); - for (Map.Entry entry : shardStats.entrySet()) { - stats.put(entry.getKey(), entry.getValue().toQueryCacheStats()); - } - QueryCacheStats shardStats = new QueryCacheStats(); - QueryCacheStats info = stats.get(shard); - if (info == null) { - info = new QueryCacheStats(); - } - shardStats.add(info); - - // We also have some shared ram usage that we try to distribute - // proportionally to their number of cache entries of each shard. - // Sometimes it's not possible to do this when there are no shard entries at all, - // which can happen as the shared ram usage can extend beyond the closing of all shards. - if (stats.isEmpty() == false) { - long totalSize = 0; - for (QueryCacheStats s : stats.values()) { - totalSize += s.getCacheSize(); + private static QueryCacheStats toQueryCacheStatsSafe(@Nullable Stats stats) { + return stats == null ? new QueryCacheStats() : stats.toQueryCacheStats(); + } + + private long getShareOfAdditionalRamBytesUsed(long cacheSize) { + if (sharedRamBytesUsed == 0L) { + return 0L; + } + + // We also have some shared ram usage that we try to distribute proportionally to the cache footprint of each shard. + // TODO avoid looping over all local shards here - see https://github.com/elastic/elasticsearch/issues/97222 + long totalSize = 0L; + int shardCount = 0; + if (cacheSize == 0L) { + for (final var stats : shardStats.values()) { + shardCount += 1; + if (stats.cacheSize > 0L) { + // some shard has nonzero cache footprint, so we apportion the shared size by cache footprint, and this shard has none + return 0L; + } + } + } else { + // branchless loop for the common case + for (final var stats : shardStats.values()) { + shardCount += 1; + totalSize += stats.cacheSize; } - final double weight = totalSize == 0 ? 1d / stats.size() : ((double) shardStats.getCacheSize()) / totalSize; - final long additionalRamBytesUsed = Math.round(weight * sharedRamBytesUsed); - assert additionalRamBytesUsed >= 0L : additionalRamBytesUsed; - shardStats.add(new QueryCacheStats(additionalRamBytesUsed, 0, 0, 0, 0)); } - return shardStats; + + if (shardCount == 0) { + // Sometimes it's not possible to do this when there are no shard entries at all, which can happen as the shared ram usage can + // extend beyond the closing of all shards. + return 0L; + } + + final long additionalRamBytesUsed; + if (totalSize == 0) { + // all shards have zero cache footprint, so we apportion the size of the shared bytes equally across all shards + additionalRamBytesUsed = Math.round((double) sharedRamBytesUsed / shardCount); + } else { + // some shards have nonzero cache footprint, so we apportion the size of the shared bytes proportionally to cache footprint + additionalRamBytesUsed = Math.round((double) sharedRamBytesUsed * cacheSize / totalSize); + } + assert additionalRamBytesUsed >= 0L : additionalRamBytesUsed; + return additionalRamBytesUsed; + } + + /** Get usage statistics for the given shard. */ + public QueryCacheStats getStats(ShardId shard) { + final QueryCacheStats queryCacheStats = toQueryCacheStatsSafe(shardStats.get(shard)); + queryCacheStats.addRamBytesUsed(getShareOfAdditionalRamBytesUsed(queryCacheStats.getCacheSize())); + return queryCacheStats; } @Override diff --git a/server/src/main/java/org/elasticsearch/indices/IndicesService.java b/server/src/main/java/org/elasticsearch/indices/IndicesService.java index f518cfc018be7..b34086ddb5b77 100644 --- a/server/src/main/java/org/elasticsearch/indices/IndicesService.java +++ b/server/src/main/java/org/elasticsearch/indices/IndicesService.java @@ -261,7 +261,7 @@ public class IndicesService extends AbstractLifecycleComponent @Override protected void doStart() { // Start thread that will manage cleaning the field data cache periodically - threadPool.schedule(this.cacheCleaner, this.cleanInterval, ThreadPool.Names.SAME); + threadPool.schedule(this.cacheCleaner, this.cleanInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE); // Start watching for timestamp fields clusterService.addStateApplier(timestampFieldMapperService); @@ -1484,7 +1484,7 @@ public void run() { } // Reschedule itself to run again if not closed if (closed.get() == false) { - threadPool.scheduleUnlessShuttingDown(interval, ThreadPool.Names.SAME, this); + threadPool.scheduleUnlessShuttingDown(interval, EsExecutors.DIRECT_EXECUTOR_SERVICE, this); } } diff --git a/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java b/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java index 033624ec79e40..98f535ec7cefb 100644 --- a/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java +++ b/server/src/main/java/org/elasticsearch/indices/NodeIndicesStats.java @@ -9,6 +9,7 @@ package org.elasticsearch.indices; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.NodeStatsLevel; import org.elasticsearch.action.admin.indices.stats.CommonStats; import org.elasticsearch.action.admin.indices.stats.IndexShardStats; @@ -57,7 +58,7 @@ */ public class NodeIndicesStats implements Writeable, ChunkedToXContent { - private static final TransportVersion VERSION_SUPPORTING_STATS_BY_INDEX = TransportVersion.V_8_5_0; + private static final TransportVersion VERSION_SUPPORTING_STATS_BY_INDEX = TransportVersions.V_8_5_0; private final CommonStats stats; private final Map> statsByShard; @@ -206,9 +207,9 @@ public DenseVectorStats getDenseVectorStats() { @Override public void writeTo(StreamOutput out) throws IOException { stats.writeTo(out); - out.writeMap(statsByShard, (o, k) -> k.writeTo(o), StreamOutput::writeList); + out.writeMap(statsByShard, StreamOutput::writeWriteable, StreamOutput::writeCollection); if (out.getTransportVersion().onOrAfter(VERSION_SUPPORTING_STATS_BY_INDEX)) { - out.writeMap(statsByIndex, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o)); + out.writeMap(statsByIndex); } } diff --git a/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java b/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java index 5f5e1994995ea..616f4f57abf06 100644 --- a/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java +++ b/server/src/main/java/org/elasticsearch/indices/SystemDataStreamDescriptor.java @@ -12,7 +12,9 @@ import org.elasticsearch.cluster.metadata.ComponentTemplate; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.DataStream; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import java.util.List; import java.util.Map; @@ -87,6 +89,7 @@ public SystemDataStreamDescriptor( this.type = Objects.requireNonNull(type, "type must be specified"); this.composableIndexTemplate = Objects.requireNonNull(composableIndexTemplate, "composableIndexTemplate must be provided"); this.componentTemplates = componentTemplates == null ? Map.of() : Map.copyOf(componentTemplates); + validateNoDownsamplingConfigured(composableIndexTemplate, componentTemplates); this.allowedElasticProductOrigins = Objects.requireNonNull( allowedElasticProductOrigins, "allowedElasticProductOrigins must not be null" @@ -99,6 +102,16 @@ public SystemDataStreamDescriptor( this.characterRunAutomaton = new CharacterRunAutomaton(buildAutomaton(backingIndexPatternForDataStream(this.dataStreamName))); } + private void validateNoDownsamplingConfigured( + ComposableIndexTemplate composableIndexTemplate, + Map componentTemplates + ) { + DataStreamLifecycle resolvedLifecycle = MetadataIndexTemplateService.resolveLifecycle(composableIndexTemplate, componentTemplates); + if (resolvedLifecycle != null && resolvedLifecycle.isEnabled() && resolvedLifecycle.getDownsamplingRounds() != null) { + throw new IllegalArgumentException("System data streams do not support downsampling as part of their lifecycle configuration"); + } + } + public String getDataStreamName() { return dataStreamName; } diff --git a/server/src/main/java/org/elasticsearch/indices/TermsLookup.java b/server/src/main/java/org/elasticsearch/indices/TermsLookup.java index b18cf552c6451..d819cdf128572 100644 --- a/server/src/main/java/org/elasticsearch/indices/TermsLookup.java +++ b/server/src/main/java/org/elasticsearch/indices/TermsLookup.java @@ -8,7 +8,7 @@ package org.elasticsearch.indices; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -56,7 +56,7 @@ public TermsLookup(String index, String id, String path) { * Read from a stream. */ public TermsLookup(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalString(); } id = in.readString(); @@ -67,7 +67,7 @@ public TermsLookup(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalString(MapperService.SINGLE_MAPPING_NAME); } out.writeString(id); diff --git a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java index a81be7fb037f8..d05a5ea377520 100644 --- a/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java +++ b/server/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; @@ -669,7 +670,7 @@ private void createShardWhenLockAvailable( // TODO could we instead subscribe to the shard lock and trigger the retry exactly when it is released rather than polling? threadPool.scheduleUnlessShuttingDown( shardLockRetryInterval, - ThreadPool.Names.SAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, () -> clusterService.getClusterApplierService() .runOnApplierThread("create shard " + shardRouting, Priority.NORMAL, currentState -> { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index e2a0364c456a6..5f06519f84c0c 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -257,14 +257,14 @@ protected void retryRecovery(final long recoveryId, final String reason, TimeVal private void retryRecovery(final long recoveryId, final TimeValue retryAfter, final TimeValue activityTimeout) { RecoveryTarget newTarget = onGoingRecoveries.resetRecovery(recoveryId, activityTimeout); if (newTarget != null) { - threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(newTarget.recoveryId())); + threadPool.scheduleUnlessShuttingDown(retryAfter, threadPool.generic(), new RecoveryRunner(newTarget.recoveryId())); } } protected void reestablishRecovery(final StartRecoveryRequest request, final String reason, TimeValue retryAfter) { final long recoveryId = request.recoveryId(); logger.trace("will try to reestablish recovery with id [{}] in [{}] (reason [{}])", recoveryId, retryAfter, reason); - threadPool.scheduleUnlessShuttingDown(retryAfter, ThreadPool.Names.GENERIC, new RecoveryRunner(recoveryId, request)); + threadPool.scheduleUnlessShuttingDown(retryAfter, threadPool.generic(), new RecoveryRunner(recoveryId, request)); } private void doRecovery(final long recoveryId, final StartRecoveryRequest preExistingRequest) { diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java index 54310659a39ff..cb73d104078dc 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveriesCollection.java @@ -82,7 +82,7 @@ private void startRecoveryInternal(RecoveryTarget recoveryTarget, TimeValue acti threadPool.schedule( new RecoveryMonitor(recoveryTarget.recoveryId(), recoveryTarget.lastAccessTime(), activityTimeout), activityTimeout, - ThreadPool.Names.GENERIC + threadPool.generic() ); } @@ -321,7 +321,7 @@ protected void doRun() throws Exception { } lastSeenAccessTime = accessTime; logger.trace("[monitor] rescheduling check for [{}]. last access time is [{}]", recoveryId, lastSeenAccessTime); - threadPool.schedule(this, checkInterval, ThreadPool.Names.GENERIC); + threadPool.schedule(this, checkInterval, threadPool.generic()); } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java index f49d59d35e652..ab5397bf8ab02 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoveryResponse.java @@ -59,10 +59,10 @@ public final class RecoveryResponse extends TransportResponse { RecoveryResponse(StreamInput in) throws IOException { super(in); - phase1FileNames = in.readStringList(); - phase1FileSizes = in.readList(StreamInput::readVLong); - phase1ExistingFileNames = in.readStringList(); - phase1ExistingFileSizes = in.readList(StreamInput::readVLong); + phase1FileNames = in.readStringCollectionAsList(); + phase1FileSizes = in.readCollectionAsList(StreamInput::readVLong); + phase1ExistingFileNames = in.readStringCollectionAsList(); + phase1ExistingFileSizes = in.readCollectionAsList(StreamInput::readVLong); phase1TotalSize = in.readVLong(); phase1ExistingTotalSize = in.readVLong(); phase1Time = in.readVLong(); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java index 06bb809fdcaf9..1199e6ba5503f 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RecoverySettings.java @@ -13,6 +13,7 @@ import org.apache.lucene.store.RateLimiter; import org.apache.lucene.store.RateLimiter.SimpleRateLimiter; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -47,9 +48,9 @@ public class RecoverySettings { public static final Version SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = Version.V_7_15_0; public static final IndexVersion SNAPSHOT_RECOVERIES_SUPPORTED_INDEX_VERSION = IndexVersion.V_7_15_0; - public static final TransportVersion SNAPSHOT_RECOVERIES_SUPPORTED_TRANSPORT_VERSION = TransportVersion.V_7_15_0; + public static final TransportVersion SNAPSHOT_RECOVERIES_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_15_0; public static final IndexVersion SEQ_NO_SNAPSHOT_RECOVERIES_SUPPORTED_VERSION = IndexVersion.V_7_16_0; - public static final TransportVersion SNAPSHOT_FILE_DOWNLOAD_THROTTLING_SUPPORTED_TRANSPORT_VERSION = TransportVersion.V_7_16_0; + public static final TransportVersion SNAPSHOT_FILE_DOWNLOAD_THROTTLING_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_16_0; private static final Logger logger = LogManager.getLogger(RecoverySettings.class); diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java index 74070e8673115..d04ed599d1e47 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/RemoteRecoveryTargetHandler.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.CancellableThreads; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.seqno.ReplicationTracker; @@ -334,7 +335,14 @@ private void executeRetryableAction( final ActionListener removeListener = ActionListener.runBefore(actionListener, () -> onGoingRetryableActions.remove(key)); final TimeValue initialDelay = TimeValue.timeValueMillis(200); final TimeValue timeout = recoverySettings.internalActionRetryTimeout(); - final RetryableAction retryableAction = new RetryableAction<>(logger, threadPool, initialDelay, timeout, removeListener) { + final RetryableAction retryableAction = new RetryableAction<>( + logger, + threadPool, + initialDelay, + timeout, + removeListener, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ) { @Override public void tryAction(ActionListener listener) { diff --git a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java index 9089e209d29b6..479b42fdf2e87 100644 --- a/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java +++ b/server/src/main/java/org/elasticsearch/indices/store/TransportNodesListShardStoreMetadata.java @@ -11,7 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -203,11 +203,11 @@ public record StoreFilesMetadata(Store.MetadataSnapshot metadataSnapshot, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeStoreFilesMetadata::readListShardStoreNodeOperationResponse); + return in.readCollectionAsList(NodeStoreFilesMetadata::readListShardStoreNodeOperationResponse); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java index 676a347c5890c..25827018e44a7 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestDocument.java @@ -908,7 +908,7 @@ public boolean equals(Object obj) { } IngestDocument other = (IngestDocument) obj; - return Objects.equals(ctxMap, other.ctxMap) && Objects.equals(ingestMetadata, other.ingestMetadata); + return Objects.equals(ctxMap, other.ctxMap) && Maps.deepEquals(ingestMetadata, other.ingestMetadata); } @Override diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index df7715a09c5c0..f3b778388809f 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -9,6 +9,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -61,7 +62,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } public Map getPipelines() { @@ -148,7 +149,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 6842343f130a3..0a34a03d38ea4 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -118,7 +118,7 @@ public class IngestService implements ClusterStateApplier, ReportingService createScheduler(ThreadPool threadPool) { - return (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), ThreadPool.Names.GENERIC); + return (delay, command) -> threadPool.schedule(command, TimeValue.timeValueMillis(delay), threadPool.generic()); } public static MatcherWatchdog createGrokThreadWatchdog(Environment env, ThreadPool threadPool) { diff --git a/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java b/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java index b48c31d00a5f3..65c5087ed6064 100644 --- a/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java +++ b/server/src/main/java/org/elasticsearch/monitor/fs/FsHealthService.java @@ -95,7 +95,7 @@ public FsHealthService(Settings settings, ClusterSettings clusterSettings, Threa @Override protected void doStart() { - scheduledFuture = threadPool.scheduleWithFixedDelay(new FsHealthMonitor(), refreshInterval, ThreadPool.Names.GENERIC); + scheduledFuture = threadPool.scheduleWithFixedDelay(new FsHealthMonitor(), refreshInterval, threadPool.generic()); } @Override diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java index e73a9d70e5e5e..ef4b5fd7d6ee7 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmGcMonitorService.java @@ -15,11 +15,11 @@ import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.monitor.jvm.JvmStats.GarbageCollector; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; import java.util.HashMap; import java.util.Locale; @@ -224,7 +224,7 @@ void onSlowGc(final Threshold threshold, final long seq, final SlowGcEvent slowG void onGcOverhead(final Threshold threshold, final long current, final long elapsed, final long seq) { logGcOverhead(logger, threshold, current, elapsed, seq); } - }, interval, Names.SAME); + }, interval, EsExecutors.DIRECT_EXECUTOR_SERVICE); } private static final String SLOW_GC_LOG_MESSAGE = diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java index d4a15bea858e3..afeac8019b219 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmInfo.java @@ -9,7 +9,7 @@ package org.elasticsearch.monitor.jvm; import org.apache.lucene.util.Constants; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -268,7 +268,7 @@ public JvmInfo(StreamInput in) throws IOException { vmName = in.readString(); vmVersion = in.readString(); vmVendor = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_3_0)) { // Before 8.0 the no-jdk distributions could have bundledJdk false, this is always true now. in.readBoolean(); } @@ -302,7 +302,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(vmName); out.writeString(vmVersion); out.writeString(vmVendor); - if (out.getTransportVersion().before(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_3_0)) { out.writeBoolean(true); } out.writeOptionalBoolean(usingBundledJdk); @@ -313,7 +313,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(bootClassPath); out.writeString(classPath); - out.writeMap(this.systemProperties, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(this.systemProperties, StreamOutput::writeString); mem.writeTo(out); out.writeStringArray(gcCollectors); out.writeStringArray(memoryPools); diff --git a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java index bc00bd668ce50..a78d93d2dca72 100644 --- a/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/jvm/JvmStats.java @@ -167,7 +167,7 @@ public JvmStats(StreamInput in) throws IOException { mem = new Mem(in); threads = new Threads(in); gc = new GarbageCollectors(in); - bufferPools = in.readList(BufferPool::new); + bufferPools = in.readCollectionAsList(BufferPool::new); classes = new Classes(in); } @@ -178,7 +178,7 @@ public void writeTo(StreamOutput out) throws IOException { mem.writeTo(out); threads.writeTo(out); gc.writeTo(out); - out.writeList(bufferPools); + out.writeCollection(bufferPools); classes.writeTo(out); } @@ -506,7 +506,7 @@ public Mem(StreamInput in) throws IOException { nonHeapCommitted = in.readVLong(); nonHeapUsed = in.readVLong(); heapMax = in.readVLong(); - pools = in.readList(MemoryPool::new); + pools = in.readCollectionAsList(MemoryPool::new); } @Override @@ -516,7 +516,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(nonHeapCommitted); out.writeVLong(nonHeapUsed); out.writeVLong(heapMax); - out.writeList(pools); + out.writeCollection(pools); } @Override diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java index d956ee4f6852c..9f0f0c6ed53f8 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsInfo.java @@ -9,6 +9,7 @@ package org.elasticsearch.monitor.os; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.unit.Processors; @@ -19,7 +20,7 @@ import java.io.IOException; public class OsInfo implements ReportingService.Info { - private static final TransportVersion DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT = TransportVersion.V_8_5_0; + private static final TransportVersion DOUBLE_PRECISION_ALLOCATED_PROCESSORS_SUPPORT = TransportVersions.V_8_5_0; private final long refreshInterval; private final int availableProcessors; diff --git a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java index dab3eb100e9fb..f4ca89c8604bf 100644 --- a/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java +++ b/server/src/main/java/org/elasticsearch/monitor/os/OsStats.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -276,7 +276,7 @@ public Mem(StreamInput in) throws IOException { total = 0; } this.total = total; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { long adjustedTotal = in.readLong(); assert adjustedTotal >= 0 : "expected adjusted total memory to be positive, got: " + adjustedTotal; if (adjustedTotal < 0) { @@ -299,7 +299,7 @@ public Mem(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(total); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeLong(adjustedTotal); } out.writeLong(free); diff --git a/server/src/main/java/org/elasticsearch/node/AdaptiveSelectionStats.java b/server/src/main/java/org/elasticsearch/node/AdaptiveSelectionStats.java index 0f54d7ce43b61..2e57ed91bdcc5 100644 --- a/server/src/main/java/org/elasticsearch/node/AdaptiveSelectionStats.java +++ b/server/src/main/java/org/elasticsearch/node/AdaptiveSelectionStats.java @@ -48,8 +48,8 @@ public AdaptiveSelectionStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.clientOutgoingConnections, StreamOutput::writeString, StreamOutput::writeLong); - out.writeMap(this.nodeComputedStats, StreamOutput::writeString, (stream, stats) -> stats.writeTo(stream)); + out.writeMap(this.clientOutgoingConnections, StreamOutput::writeLong); + out.writeMap(this.nodeComputedStats, StreamOutput::writeWriteable); } @Override diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 2e27a6b36c07d..da0fb6064917c 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -16,6 +16,7 @@ import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; +import org.elasticsearch.TransportVersion; import org.elasticsearch.Version; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; @@ -66,6 +67,7 @@ import org.elasticsearch.cluster.routing.allocation.WriteLoadForecaster; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.TransportVersionsFixupListener; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.StopWatch; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.component.Lifecycle; @@ -108,7 +110,6 @@ import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.gateway.MetaStateService; import org.elasticsearch.gateway.PersistedClusterStateService; -import org.elasticsearch.health.HealthIndicatorService; import org.elasticsearch.health.HealthPeriodicLogger; import org.elasticsearch.health.HealthService; import org.elasticsearch.health.metadata.HealthMetadataService; @@ -926,6 +927,7 @@ protected Node( ); clusterInfoService.addListener(diskThresholdMonitor::onNewInfo); + CompatibilityVersions compatibilityVersions = new CompatibilityVersions(TransportVersion.current()); final DiscoveryModule discoveryModule = new DiscoveryModule( settings, transportService, @@ -942,7 +944,8 @@ protected Node( gatewayMetaState, rerouteService, fsHealthService, - circuitBreakerService + circuitBreakerService, + compatibilityVersions ); this.nodeService = new NodeService( settings, @@ -1027,14 +1030,14 @@ protected Node( clusterService.getClusterSettings() ); - MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); - CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( + final MasterHistoryService masterHistoryService = new MasterHistoryService(transportService, threadPool, clusterService); + final CoordinationDiagnosticsService coordinationDiagnosticsService = new CoordinationDiagnosticsService( clusterService, transportService, discoveryModule.getCoordinator(), masterHistoryService ); - HealthService healthService = createHealthService( + final HealthService healthService = createHealthService( clusterService, clusterModule, coordinationDiagnosticsService, @@ -1152,10 +1155,11 @@ protected Node( b.bind(FileSettingsService.class).toInstance(fileSettingsService); b.bind(WriteLoadForecaster.class).toInstance(writeLoadForecaster); b.bind(HealthPeriodicLogger.class).toInstance(healthPeriodicLogger); + b.bind(CompatibilityVersions.class).toInstance(compatibilityVersions); }); if (ReadinessService.enabled(environment)) { - modules.add(b -> b.bind(ReadinessService.class).toInstance(new ReadinessService(clusterService, environment))); + modules.add(b -> b.bind(ReadinessService.class).toInstance(newReadinessService(clusterService, environment))); } injector = modules.createInjector(); @@ -1289,26 +1293,18 @@ private HealthService createHealthService( ThreadPool threadPool, SystemIndices systemIndices ) { - List preflightHealthIndicatorServices = Collections.singletonList( - new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService) - ); - var serverHealthIndicatorServices = new ArrayList<>( - List.of( - new RepositoryIntegrityHealthIndicatorService(clusterService), - new ShardsAvailabilityHealthIndicatorService(clusterService, clusterModule.getAllocationService(), systemIndices) - ) + var serverHealthIndicatorServices = List.of( + new StableMasterHealthIndicatorService(coordinationDiagnosticsService, clusterService), + new RepositoryIntegrityHealthIndicatorService(clusterService), + new ShardsAvailabilityHealthIndicatorService(clusterService, clusterModule.getAllocationService(), systemIndices), + new DiskHealthIndicatorService(clusterService), + new ShardsCapacityHealthIndicatorService(clusterService) ); - serverHealthIndicatorServices.add(new DiskHealthIndicatorService(clusterService)); - serverHealthIndicatorServices.add(new ShardsCapacityHealthIndicatorService(clusterService)); var pluginHealthIndicatorServices = pluginsService.filterPlugins(HealthPlugin.class) .stream() .flatMap(plugin -> plugin.getHealthIndicatorServices().stream()) .toList(); - return new HealthService( - preflightHealthIndicatorServices, - concatLists(serverHealthIndicatorServices, pluginHealthIndicatorServices), - threadPool - ); + return new HealthService(concatLists(serverHealthIndicatorServices, pluginHealthIndicatorServices), threadPool); } private HealthPeriodicLogger createHealthPeriodicLogger( @@ -1483,7 +1479,8 @@ public Node start() throws NodeValidationException { injector.getInstance(IndexMetadataVerifier.class), injector.getInstance(MetadataUpgrader.class), injector.getInstance(PersistedClusterStateService.class), - pluginsService.filterPlugins(ClusterCoordinationPlugin.class) + pluginsService.filterPlugins(ClusterCoordinationPlugin.class), + injector.getInstance(CompatibilityVersions.class) ); // TODO: Do not expect that the legacy metadata file is always present https://github.com/elastic/elasticsearch/issues/95211 if (Assertions.ENABLED && DiscoveryNode.isStateless(settings()) == false) { @@ -1911,7 +1908,7 @@ PageCacheRecycler createPageCacheRecycler(Settings settings) { } /** - * Creates a new the SearchService. This method can be overwritten by tests to inject mock implementations. + * Creates a new SearchService. This method can be overwritten by tests to inject mock implementations. */ protected SearchService newSearchService( ClusterService clusterService, @@ -1940,7 +1937,7 @@ protected SearchService newSearchService( } /** - * Creates a new the ScriptService. This method can be overwritten by tests to inject mock implementations. + * Creates a new ScriptService. This method can be overwritten by tests to inject mock implementations. */ protected ScriptService newScriptService( Settings settings, @@ -1951,6 +1948,13 @@ protected ScriptService newScriptService( return new ScriptService(settings, engines, contexts, timeProvider); } + /** + * Creates a new ReadinessService. This method can be overwritten by tests to inject mock implementations. + */ + protected ReadinessService newReadinessService(ClusterService clusterService, Environment environment) { + return new ReadinessService(clusterService, environment); + } + /** * Get Custom Name Resolvers list based on a Discovery Plugins list * diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index e2ca66f3835a9..e2d7d0c8366d8 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -26,6 +26,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; @@ -524,7 +525,7 @@ private static Assignment unassignedAssignment(String reason) { class PeriodicRechecker extends AbstractAsyncTask { PeriodicRechecker(TimeValue recheckInterval) { - super(logger, threadPool, recheckInterval, false); + super(logger, threadPool, EsExecutors.DIRECT_EXECUTOR_SERVICE, recheckInterval, false); } @Override @@ -535,6 +536,7 @@ protected boolean mustReschedule() { @Override public void runInternal() { if (clusterService.localNode().isMasterNode()) { + // TODO just run on the elected master? final ClusterState state = clusterService.state(); logger.trace("periodic persistent task assignment check running for cluster state {}", state.getVersion()); if (isAnyTaskUnassigned(state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE))) { diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java index 1ce3ab6633faf..5fdac777b5a75 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -199,7 +200,7 @@ public long getNumberOfTasksOnNode(String nodeId, String taskName) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -542,7 +543,7 @@ public void writeTo(StreamOutput out) throws IOException { .stream() .filter(t -> VersionedNamedWriteable.shouldSerialize(out, t.getParams())) .collect(Collectors.toMap(PersistentTask::getId, Function.identity())); - out.writeMap(filteredTasks, StreamOutput::writeString, (stream, value) -> value.writeTo(stream)); + out.writeMap(filteredTasks, StreamOutput::writeWriteable); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { diff --git a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java index 207920cd60ac9..095eec2811edc 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ActionPlugin.java @@ -52,14 +52,6 @@ public interface ActionPlugin { return Collections.emptyList(); } - /** - * Client actions added by this plugin. This defaults to all of the {@linkplain ActionType} in - * {@linkplain ActionPlugin#getActions()}. - */ - default List> getClientActions() { - return getActions().stream().>map(a -> a.action).toList(); - } - /** * ActionType filters added by this plugin. */ diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginApiInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginApiInfo.java index 1e1ac79320ed4..fe8511ddb7623 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginApiInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginApiInfo.java @@ -26,7 +26,7 @@ public record PluginApiInfo(List legacyInterfaces, List legacyMethods) implements Writeable, ToXContentFragment { public PluginApiInfo(StreamInput in) throws IOException { - this(in.readImmutableList(StreamInput::readString), in.readImmutableList(StreamInput::readString)); + this(in.readCollectionAsImmutableList(StreamInput::readString), in.readCollectionAsImmutableList(StreamInput::readString)); } @Override diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java index dd56e18957318..46028ad36b66c 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginDescriptor.java @@ -9,6 +9,7 @@ package org.elasticsearch.plugins; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -46,9 +47,9 @@ public class PluginDescriptor implements Writeable, ToXContentObject { public static final String ES_PLUGIN_POLICY = "plugin-security.policy"; - private static final TransportVersion LICENSED_PLUGINS_SUPPORT = TransportVersion.V_7_11_0; - private static final TransportVersion MODULE_NAME_SUPPORT = TransportVersion.V_8_3_0; - private static final TransportVersion BOOTSTRAP_SUPPORT_REMOVED = TransportVersion.V_8_4_0; + private static final TransportVersion LICENSED_PLUGINS_SUPPORT = TransportVersions.V_7_11_0; + private static final TransportVersion MODULE_NAME_SUPPORT = TransportVersions.V_8_3_0; + private static final TransportVersion BOOTSTRAP_SUPPORT_REMOVED = TransportVersions.V_8_4_0; private final String name; private final String description; @@ -124,7 +125,7 @@ public PluginDescriptor(final StreamInput in) throws IOException { } else { this.moduleName = null; } - extendedPlugins = in.readStringList(); + extendedPlugins = in.readStringCollectionAsList(); hasNativeController = in.readBoolean(); if (in.getTransportVersion().onOrAfter(LICENSED_PLUGINS_SUPPORT)) { @@ -137,7 +138,7 @@ public PluginDescriptor(final StreamInput in) throws IOException { isLicensed = false; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { isModular = in.readBoolean(); isStable = in.readBoolean(); } else { @@ -167,7 +168,7 @@ public void writeTo(final StreamOutput out) throws IOException { } out.writeBoolean(isLicensed); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(isModular); out.writeBoolean(isStable); } diff --git a/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java b/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java index f58f14bcd7a77..87ef68c91f363 100644 --- a/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java +++ b/server/src/main/java/org/elasticsearch/plugins/PluginRuntimeInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.plugins; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -40,7 +40,7 @@ public PluginRuntimeInfo(StreamInput in) throws IOException { } private static Boolean readIsOfficial(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { return in.readBoolean(); } else { return null; @@ -48,7 +48,7 @@ private static Boolean readIsOfficial(StreamInput in) throws IOException { } private static PluginApiInfo readApiInfo(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { return in.readOptionalWriteable(PluginApiInfo::new); } else { return null; @@ -72,7 +72,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { descriptor.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeBoolean(isOfficial); out.writeOptionalWriteable(pluginApiInfo); } diff --git a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java index bcd82b18470ca..e985e279770e9 100644 --- a/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/SearchPlugin.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -147,6 +148,13 @@ default List> getRescorers() { return emptyList(); } + /** + * Additional GenericNamedWriteable classes added by this plugin. + */ + default List getGenericNamedWriteables() { + return emptyList(); + } + /** * Allows plugins to register a cache differentiator which contributes to the cacheKey * computation for the request cache. This helps differentiate between queries that @@ -634,4 +642,9 @@ public QueryVectorBuilderSpec(String name, Writeable.Reader reader, BiFunctio super(name, reader, parser); } } + + /** + * Specification of GenericNamedWriteable classes that can be serialized/deserialized as generic objects in search results. + */ + record GenericNamedWriteableSpec(String name, Writeable.Reader reader) {} } diff --git a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java index 80ac090f2539d..774d47b583686 100644 --- a/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java +++ b/server/src/main/java/org/elasticsearch/readiness/ReadinessService.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.CheckedSupplier; import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.network.NetworkAddress; import org.elasticsearch.common.settings.Setting; @@ -41,6 +42,7 @@ public class ReadinessService extends AbstractLifecycleComponent implements Clus private static final Logger logger = LogManager.getLogger(ReadinessService.class); private final Environment environment; + private final CheckedSupplier socketChannelFactory; private volatile boolean active; // false; private volatile ServerSocketChannel serverChannel; @@ -55,8 +57,18 @@ public class ReadinessService extends AbstractLifecycleComponent implements Clus public static final Setting PORT = Setting.intSetting("readiness.port", -1, Setting.Property.NodeScope); public ReadinessService(ClusterService clusterService, Environment environment) { + this(clusterService, environment, ServerSocketChannel::open); + } + + // package private to enable mocking (for testing) + ReadinessService( + ClusterService clusterService, + Environment environment, + CheckedSupplier socketChannelFactory + ) { this.serverChannel = null; this.environment = environment; + this.socketChannelFactory = socketChannelFactory; clusterService.addListener(this); } @@ -119,7 +131,7 @@ ServerSocketChannel setupSocket() { }); try { - serverChannel = ServerSocketChannel.open(); + serverChannel = socketChannelFactory.get(); AccessController.doPrivileged((PrivilegedAction) () -> { try { diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java index 6a13053ec9279..722779a646824 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.repositories; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -28,7 +28,7 @@ public class RepositoriesStats implements Writeable, ToXContentFragment { private final Map repositoryThrottlingStats; public RepositoriesStats(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { repositoryThrottlingStats = in.readMap(ThrottlingStats::new); } else { repositoryThrottlingStats = new HashMap<>(); @@ -41,8 +41,8 @@ public RepositoriesStats(Map repositoryThrottlingStats) @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_011)) { - out.writeMap(repositoryThrottlingStats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + out.writeMap(repositoryThrottlingStats, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index f9f08324c1603..df657abd55152 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -307,7 +307,7 @@ public SnapshotState getSnapshotState(final SnapshotId snapshotId) { } /** - * Returns the {@link Version} for the given snapshot or {@code null} if unknown. + * Returns the {@link IndexVersion} for the given snapshot or {@code null} if unknown. */ @Nullable public IndexVersion getVersion(SnapshotId snapshotId) { diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryInfo.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryInfo.java index fe3251a10d7fb..c6cebdbc9a6fb 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryInfo.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryInfo.java @@ -76,7 +76,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(ephemeralId); out.writeString(name); out.writeString(type); - out.writeMap(location, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(location, StreamOutput::writeString); out.writeLong(startedAt); out.writeOptionalLong(stoppedAt); } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryStats.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryStats.java index b991b3954edfc..5a3f1634beab1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryStats.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryStats.java @@ -43,7 +43,7 @@ public RepositoryStats merge(RepositoryStats otherStats) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(requestCounts, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(requestCounts, StreamOutput::writeLong); } @Override diff --git a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java index e4868a3937e96..e34830dc3b355 100644 --- a/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java +++ b/server/src/main/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsAction.java @@ -12,13 +12,13 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsRequest; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.reservedstate.ReservedClusterStateHandler; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.xcontent.XContentParser; import java.io.IOException; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -49,21 +49,17 @@ public String name() { @SuppressWarnings("unchecked") private ClusterUpdateSettingsRequest prepare(Object input, Set previouslySet) { - final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); + // load the new settings into a builder so their paths are normalized + @SuppressWarnings("unchecked") + Settings.Builder newSettings = Settings.builder().loadFromMap((Map) input); - Map persistentSettings = new HashMap<>(); + // now the new and old settings can be compared to find which are missing for deletion Set toDelete = new HashSet<>(previouslySet); + toDelete.removeAll(newSettings.keys()); + toDelete.forEach(k -> newSettings.put(k, (String) null)); - Map settings = (Map) input; - - settings.forEach((k, v) -> { - persistentSettings.put(k, v); - toDelete.remove(k); - }); - - toDelete.forEach(k -> persistentSettings.put(k, null)); - - clusterUpdateSettingsRequest.persistentSettings(persistentSettings); + final ClusterUpdateSettingsRequest clusterUpdateSettingsRequest = new ClusterUpdateSettingsRequest(); + clusterUpdateSettingsRequest.persistentSettings(newSettings); return clusterUpdateSettingsRequest; } diff --git a/server/src/main/java/org/elasticsearch/script/CtxMap.java b/server/src/main/java/org/elasticsearch/script/CtxMap.java index 71c8ebea3fd36..a379ba3db4faf 100644 --- a/server/src/main/java/org/elasticsearch/script/CtxMap.java +++ b/server/src/main/java/org/elasticsearch/script/CtxMap.java @@ -8,6 +8,7 @@ package org.elasticsearch.script; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.set.Sets; import java.util.AbstractCollection; @@ -336,9 +337,9 @@ public Object setValue(Object value) { public boolean equals(Object o) { if (this == o) return true; if ((o instanceof CtxMap) == false) return false; - if (super.equals(o) == false) return false; CtxMap ctxMap = (CtxMap) o; - return source.equals(ctxMap.source) && metadata.equals(ctxMap.metadata); + if (Maps.deepEquals(this, ctxMap) == false) return false; + return Maps.deepEquals(source, ctxMap.source) && metadata.equals(ctxMap.metadata); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java b/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java index 8b947e5034168..7af41d922d8df 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptContextStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -60,10 +60,10 @@ public static ScriptContextStats read(StreamInput in) throws IOException { var compilationLimitTriggered = in.readVLong(); TimeSeries compilationsHistory; TimeSeries cacheEvictionsHistory; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { compilationsHistory = new TimeSeries(in); cacheEvictionsHistory = new TimeSeries(in); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + } else if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { compilationsHistory = new TimeSeries(in).withTotal(compilations); cacheEvictionsHistory = new TimeSeries(in).withTotal(cacheEvictions); } else { @@ -98,7 +98,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(compilations); out.writeVLong(cacheEvictions); out.writeVLong(compilationLimitTriggered); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { compilationsHistory.writeTo(out); cacheEvictionsHistory.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptException.java b/server/src/main/java/org/elasticsearch/script/ScriptException.java index f7136f0ec4333..4b57b90624acb 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptException.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptException.java @@ -9,7 +9,7 @@ package org.elasticsearch.script; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -78,7 +78,7 @@ public ScriptException(StreamInput in) throws IOException { scriptStack = Arrays.asList(in.readStringArray()); script = in.readString(); lang = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0) && in.readBoolean()) { pos = new Position(in); } else { pos = null; @@ -88,10 +88,10 @@ public ScriptException(StreamInput in) throws IOException { @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); - out.writeStringArray(scriptStack.toArray(new String[0])); + out.writeStringCollection(scriptStack); out.writeString(script); out.writeString(lang); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { if (pos == null) { out.writeBoolean(false); } else { diff --git a/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java b/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java index 8993188c36c24..7b3ea4fbe4581 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptLanguagesInfo.java @@ -82,8 +82,8 @@ public ScriptLanguagesInfo(Set typesAllowed, Map> la } public ScriptLanguagesInfo(StreamInput in) throws IOException { - typesAllowed = in.readImmutableSet(StreamInput::readString); - languageContexts = in.readImmutableMap(sin -> sin.readImmutableSet(StreamInput::readString)); + typesAllowed = in.readCollectionAsImmutableSet(StreamInput::readString); + languageContexts = in.readImmutableMap(sin -> sin.readCollectionAsImmutableSet(StreamInput::readString)); } @SuppressWarnings("unchecked") @@ -114,7 +114,7 @@ public static ScriptLanguagesInfo fromXContent(XContentParser parser) throws IOE @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(typesAllowed); - out.writeMap(languageContexts, StreamOutput::writeString, StreamOutput::writeStringCollection); + out.writeMap(languageContexts, StreamOutput::writeStringCollection); } @Override diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java b/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java index 6c7604bc2aef8..bf076692133a5 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; @@ -129,7 +130,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } } @@ -258,7 +259,7 @@ public ScriptMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(scripts, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(scripts, StreamOutput::writeWriteable); } @Override @@ -281,7 +282,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override diff --git a/server/src/main/java/org/elasticsearch/script/ScriptStats.java b/server/src/main/java/org/elasticsearch/script/ScriptStats.java index 3b21d8746e5f5..e45f75124e923 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptStats.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -128,7 +128,7 @@ public static ScriptStats read(StreamInput in) throws IOException { TimeSeries cacheEvictionsHistory; long compilations; long cacheEvictions; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { compilationsHistory = new TimeSeries(in); cacheEvictionsHistory = new TimeSeries(in); compilations = compilationsHistory.total; @@ -140,7 +140,7 @@ public static ScriptStats read(StreamInput in) throws IOException { cacheEvictionsHistory = new TimeSeries(cacheEvictions); } var compilationLimitTriggered = in.readVLong(); - var contextStats = in.readList(ScriptContextStats::read); + var contextStats = in.readCollectionAsList(ScriptContextStats::read); return new ScriptStats( contextStats, compilations, @@ -153,7 +153,7 @@ public static ScriptStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { compilationsHistory.writeTo(out); cacheEvictionsHistory.writeTo(out); } else { @@ -161,7 +161,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(cacheEvictions); } out.writeVLong(compilationLimitTriggered); - out.writeList(contextStats); + out.writeCollection(contextStats); } public List getContextStats() { diff --git a/server/src/main/java/org/elasticsearch/script/TimeSeries.java b/server/src/main/java/org/elasticsearch/script/TimeSeries.java index 2c541fef08e0c..38114b8089180 100644 --- a/server/src/main/java/org/elasticsearch/script/TimeSeries.java +++ b/server/src/main/java/org/elasticsearch/script/TimeSeries.java @@ -8,7 +8,7 @@ package org.elasticsearch.script; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -58,7 +58,7 @@ public TimeSeries(StreamInput in) throws IOException { fiveMinutes = in.readVLong(); fifteenMinutes = in.readVLong(); twentyFourHours = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { total = in.readVLong(); } else { total = 0; @@ -79,7 +79,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(fiveMinutes); out.writeVLong(fifteenMinutes); out.writeVLong(twentyFourHours); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeVLong(total); } } diff --git a/server/src/main/java/org/elasticsearch/search/CanMatchShardResponse.java b/server/src/main/java/org/elasticsearch/search/CanMatchShardResponse.java index d432a5b4f89d9..a42fa9d8eec36 100644 --- a/server/src/main/java/org/elasticsearch/search/CanMatchShardResponse.java +++ b/server/src/main/java/org/elasticsearch/search/CanMatchShardResponse.java @@ -8,7 +8,7 @@ package org.elasticsearch.search; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.sort.MinAndMax; @@ -25,7 +25,7 @@ public final class CanMatchShardResponse extends SearchPhaseResult { public CanMatchShardResponse(StreamInput in) throws IOException { super(in); this.canMatch = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { estimatedMinAndMax = in.readOptionalWriteable(MinAndMax::new); } else { estimatedMinAndMax = null; @@ -40,7 +40,7 @@ public CanMatchShardResponse(boolean canMatch, MinAndMax estimatedMinAndMax) @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(canMatch); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeOptionalWriteable(estimatedMinAndMax); } } diff --git a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java index 651bcfbef045d..013c3587e84f5 100644 --- a/server/src/main/java/org/elasticsearch/search/DocValueFormat.java +++ b/server/src/main/java/org/elasticsearch/search/DocValueFormat.java @@ -10,7 +10,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; @@ -241,14 +241,14 @@ public DateTime(StreamInput in) throws IOException { this.formatter = DateFormatter.forPattern(formatterPattern).withZone(this.timeZone); this.parser = formatter.toDateMathParser(); this.resolution = DateFieldMapper.Resolution.ofOrdinal(in.readVInt()); - if (in.getTransportVersion().between(TransportVersion.V_7_7_0, TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().between(TransportVersions.V_7_7_0, TransportVersions.V_8_0_0)) { /* when deserialising from 7.7+ nodes expect a flag indicating if a pattern is of joda style This is only used to support joda style indices in 7.x, in 8 we no longer support this. All indices in 8 should use java style pattern. Hence we can ignore this flag. */ in.readBoolean(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { this.formatSortValues = in.readBoolean(); } else { this.formatSortValues = false; @@ -265,14 +265,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(formatter.pattern()); out.writeString(timeZone.getId()); out.writeVInt(resolution.ordinal()); - if (out.getTransportVersion().between(TransportVersion.V_7_7_0, TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().between(TransportVersions.V_7_7_0, TransportVersions.V_8_0_0)) { /* when serializing to 7.7+ send out a flag indicating if a pattern is of joda style This is only used to support joda style indices in 7.x, in 8 we no longer support this. All indices in 8 should use java style pattern. Hence this flag is always false. */ out.writeBoolean(false); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeBoolean(formatSortValues); } } diff --git a/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java b/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java index b0ecbae2e7fbf..df623e5d48a6d 100644 --- a/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java +++ b/server/src/main/java/org/elasticsearch/search/RescoreDocIds.java @@ -31,7 +31,7 @@ public RescoreDocIds(Map> docIds) { } public RescoreDocIds(StreamInput in) throws IOException { - docIds = in.readMap(StreamInput::readVInt, i -> i.readSet(StreamInput::readVInt)); + docIds = in.readMap(StreamInput::readVInt, i -> i.readCollectionAsSet(StreamInput::readVInt)); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java b/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java index 76dc2cb69122f..26dc0027e3840 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/SearchExtBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.VersionedNamedWriteable; @@ -43,6 +44,6 @@ public abstract class SearchExtBuilder implements VersionedNamedWriteable, ToXCo @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchHit.java b/server/src/main/java/org/elasticsearch/search/SearchHit.java index 3383227a5b949..9fa99bb4a773f 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchHit.java +++ b/server/src/main/java/org/elasticsearch/search/SearchHit.java @@ -10,7 +10,7 @@ import org.apache.lucene.search.Explanation; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -138,11 +138,11 @@ public SearchHit(int nestedTopDocId, String id, NestedIdentity nestedIdentity) { public SearchHit(StreamInput in) throws IOException { docId = -1; score = in.readFloat(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { rank = in.readVInt(); } id = in.readOptionalText(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readOptionalText(); } nestedIdentity = in.readOptionalWriteable(NestedIdentity::new); @@ -156,7 +156,7 @@ public SearchHit(StreamInput in) throws IOException { if (in.readBoolean()) { explanation = readExplanation(in); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { documentFields.putAll(in.readMap(DocumentField::new)); metaFields.putAll(in.readMap(DocumentField::new)); } else { @@ -186,7 +186,7 @@ public SearchHit(StreamInput in) throws IOException { sortValues = new SearchSortValues(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { matchedQueries = in.readOrderedMap(StreamInput::readString, StreamInput::readFloat); } else { size = in.readVInt(); @@ -243,13 +243,13 @@ private static void writeFields(StreamOutput out, Map fie @Override public void writeTo(StreamOutput out) throws IOException { out.writeFloat(score); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeVInt(rank); } else if (rank != NO_RANK) { throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion() + "]"); } out.writeOptionalText(id); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeOptionalText(SINGLE_MAPPING_TYPE); } out.writeOptionalWriteable(nestedIdentity); @@ -263,9 +263,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); writeExplanation(out, explanation); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { - out.writeMap(documentFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); - out.writeMap(metaFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { + out.writeMap(documentFields, StreamOutput::writeWriteable); + out.writeMap(metaFields, StreamOutput::writeWriteable); } else { writeFields(out, this.getFields()); } @@ -276,16 +276,16 @@ public void writeTo(StreamOutput out) throws IOException { } sortValues.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - out.writeMap(matchedQueries, StreamOutput::writeString, StreamOutput::writeFloat); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + out.writeMap(matchedQueries, StreamOutput::writeFloat); } else { - out.writeStringArray(matchedQueries.keySet().toArray(new String[0])); + out.writeStringCollection(matchedQueries.keySet()); } out.writeOptionalWriteable(shard); if (innerHits == null) { out.writeVInt(0); } else { - out.writeMap(innerHits, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(innerHits, StreamOutput::writeWriteable); } } diff --git a/server/src/main/java/org/elasticsearch/search/SearchModule.java b/server/src/main/java/org/elasticsearch/search/SearchModule.java index a50d7c5e1f99e..babce70239871 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchModule.java +++ b/server/src/main/java/org/elasticsearch/search/SearchModule.java @@ -10,6 +10,8 @@ import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.NamedRegistry; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.NamedWriteableRegistry.Entry; import org.elasticsearch.common.io.stream.StreamOutput; @@ -315,6 +317,9 @@ public SearchModule(Settings settings, List plugins) { registerIntervalsSourceProviders(); requestCacheKeyDifferentiator = registerRequestCacheKeyDifferentiator(plugins); namedWriteables.addAll(SortValue.namedWriteables()); + registerGenericNamedWriteable( + new SearchPlugin.GenericNamedWriteableSpec(GeoBoundingBox.class.getSimpleName(), GeoBoundingBox::new) + ); } public List getNamedWriteables() { @@ -652,6 +657,9 @@ private ValuesSourceRegistry registerAggregations(List plugins) { } }); + // Register GenericNamedWriteable classes for use in StreamOutput/StreamInput as generic types in query hits + registerFromPlugin(plugins, SearchPlugin::getGenericNamedWriteables, this::registerGenericNamedWriteable); + return builder.build(); } @@ -678,6 +686,10 @@ private void registerAggregation(AggregationSpec spec, ValuesSourceRegistry.Buil } } + private void registerGenericNamedWriteable(SearchPlugin.GenericNamedWriteableSpec spec) { + namedWriteables.add(new NamedWriteableRegistry.Entry(GenericNamedWriteable.class, spec.name(), spec.reader())); + } + private void registerPipelineAggregations(List plugins) { registerPipelineAggregation( new PipelineAggregationSpec( diff --git a/server/src/main/java/org/elasticsearch/search/SearchService.java b/server/src/main/java/org/elasticsearch/search/SearchService.java index 89dc843886a2e..5ceeb53a8df6d 100644 --- a/server/src/main/java/org/elasticsearch/search/SearchService.java +++ b/server/src/main/java/org/elasticsearch/search/SearchService.java @@ -40,6 +40,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.Releasables; @@ -339,7 +340,7 @@ public SearchService( SearchService::validateKeepAlives ); - this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME); + this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, EsExecutors.DIRECT_EXECUTOR_SERVICE); defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); @@ -569,7 +570,7 @@ private void ensureAfterSeqNoRefreshed( new ElasticsearchTimeoutException("Wait for seq_no [{}] refreshed timed out [{}]", waitForCheckpoint, timeout) ); } - }, timeout, Names.SAME); + }, timeout, EsExecutors.DIRECT_EXECUTOR_SERVICE); // allow waiting for not-yet-issued sequence number if shard isn't promotable to primary and the timeout is less than or equal // to 30s @@ -588,7 +589,7 @@ public void onResponse(Void unused) { shard.addGlobalCheckpointListener(waitForCheckpoint, new GlobalCheckpointListeners.GlobalCheckpointListener() { @Override public Executor executor() { - return threadPool.executor(Names.SAME); + return EsExecutors.DIRECT_EXECUTOR_SERVICE; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java index 6dc05fa8fe843..786fabca78d63 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregationPhase.java @@ -72,7 +72,6 @@ private static void executeInSortOrder(SearchContext context, BucketCollector co searcher.setProfiler(context); try { searcher.search(context.rewrittenQuery(), collector); - collector.postCollection(); } catch (IOException e) { throw new AggregationExecutionException("Could not perform time series aggregation", e); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java index 2c265b066a8b5..e571458703e5a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/AggregatorFactories.java @@ -303,8 +303,8 @@ public Builder(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(this.aggregationBuilders, StreamOutput::writeNamedWriteable); - out.writeCollection(this.pipelineAggregatorBuilders, StreamOutput::writeNamedWriteable); + out.writeNamedWriteableCollection(this.aggregationBuilders); + out.writeNamedWriteableCollection(this.pipelineAggregatorBuilders); } public boolean mustVisitAllDocs() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java index fac37ddb0644b..c1c54f80987f0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/InternalAggregations.java @@ -59,12 +59,12 @@ public static InternalAggregations from(List aggregations) } public static InternalAggregations readFrom(StreamInput in) throws IOException { - return from(in.readList(stream -> stream.readNamedWriteable(InternalAggregation.class))); + return from(in.readCollectionAsList(stream -> stream.readNamedWriteable(InternalAggregation.class))); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(getInternalAggregations()); + out.writeNamedWriteableCollection(getInternalAggregations()); } /** diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java index 272d0db8cd002..6b894b3a9dbbd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.composite; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.mapper.TimeSeriesIdFieldMapper.TimeSeriesIdFieldType; @@ -304,6 +305,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java index 49c29bf722a9c..6bbebc0ec9e5e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/CompositeValuesSourceBuilder.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.composite; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -55,7 +55,7 @@ public abstract class CompositeValuesSourceBuilder builder, StreamOutput } else if (builder.getClass() == HistogramValuesSourceBuilder.class) { code = 2; } else if (builder.getClass() == GeoTileGridValuesSourceBuilder.class) { - if (out.getTransportVersion().before(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().before(TransportVersions.V_7_5_0)) { throw new IOException( "Attempting to serialize [" + builder.getClass().getSimpleName() diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java index 036a96dc8757d..2cd33e470e3e5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/DateHistogramValuesSourceBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.composite; import org.apache.lucene.index.IndexReader; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -105,7 +105,7 @@ protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { super(in); dateHistogramInterval = new DateIntervalWrapper(in); timeZone = in.readOptionalZoneId(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { offset = in.readLong(); } } @@ -114,7 +114,7 @@ protected DateHistogramValuesSourceBuilder(StreamInput in) throws IOException { protected void innerWriteTo(StreamOutput out) throws IOException { dateHistogramInterval.writeTo(out); out.writeOptionalZoneId(timeZone); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeLong(offset); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java index 558698e6bc132..927104a92deb2 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/composite/InternalComposite.java @@ -10,7 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.PriorityQueue; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -94,21 +94,21 @@ private void validateAfterKey() { public InternalComposite(StreamInput in) throws IOException { super(in); this.size = in.readVInt(); - this.sourceNames = in.readStringList(); + this.sourceNames = in.readStringCollectionAsList(); this.formats = new ArrayList<>(sourceNames.size()); for (int i = 0; i < sourceNames.size(); i++) { formats.add(in.readNamedWriteable(DocValueFormat.class)); } this.reverseMuls = in.readIntArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { this.missingOrders = in.readArray(MissingOrder::readFromStream, MissingOrder[]::new); } else { this.missingOrders = new MissingOrder[reverseMuls.length]; Arrays.fill(missingOrders, MissingOrder.DEFAULT); } - this.buckets = in.readList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls, missingOrders)); + this.buckets = in.readCollectionAsList((input) -> new InternalBucket(input, sourceNames, formats, reverseMuls, missingOrders)); this.afterKey = in.readOptionalWriteable(CompositeKey::new); - this.earlyTerminated = in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0) ? in.readBoolean() : false; + this.earlyTerminated = in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0) ? in.readBoolean() : false; } @Override @@ -119,12 +119,12 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); } out.writeIntArray(reverseMuls); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { - out.writeArray((o, order) -> order.writeTo(o), missingOrders); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { + out.writeArray(missingOrders); } - out.writeList(buckets); + out.writeCollection(buckets); out.writeOptionalWriteable(afterKey); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeBoolean(earlyTerminated); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java index 69f51e12d86ae..dc11658437670 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; @@ -151,7 +152,7 @@ public QueryBuilder getFilter() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } public static class FilterAggregatorFactory extends AggregatorFactory { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java index dfc0b46dc0837..a2ce68e3fc29e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FilterByFilterAggregator.java @@ -233,7 +233,7 @@ private FilterByFilterAggregator( @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { assert scoreMode().needsScores() == false; - if (QueryToFilterAdapter.MatchesNoDocs(filters())) { + if (QueryToFilterAdapter.matchesNoDocs(filters())) { return LeafBucketCollector.NO_OP_COLLECTOR; } Bits live = aggCtx.getLeafReaderContext().reader().getLiveDocs(); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java index f2ddbf2582912..81678404d1dab 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -126,16 +127,16 @@ public FiltersAggregationBuilder(StreamInput in) throws IOException { } otherBucket = in.readBoolean(); otherBucketKey = in.readString(); - keyedBucket = in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) ? in.readBoolean() : true; + keyedBucket = in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readBoolean() : true; } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(keyed); - out.writeCollection(filters, keyed ? (o, v) -> v.writeTo(o) : (o, v) -> o.writeNamedWriteable(v.filter())); + out.writeCollection(filters, keyed ? StreamOutput::writeWriteable : (o, v) -> o.writeNamedWriteable(v.filter())); out.writeBoolean(otherBucket); out.writeString(otherBucketKey); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(keyedBucket); } } @@ -392,6 +393,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java index 4cc3f976c6da2..e0792fca6c28f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregator.java @@ -9,8 +9,13 @@ package org.elasticsearch.search.aggregations.bucket.filter; import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.DisiPriorityQueue; +import org.apache.lucene.search.DisiWrapper; +import org.apache.lucene.search.DisjunctionDISIApproximation; +import org.apache.lucene.search.DocIdSetIterator; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Scorable; +import org.apache.lucene.search.Scorer; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -38,7 +43,6 @@ import java.util.Map; import java.util.Objects; import java.util.function.BiConsumer; -import java.util.function.IntPredicate; import java.util.function.LongPredicate; /** @@ -289,33 +293,210 @@ static class Compatible extends FiltersAggregator { @Override protected LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, LeafBucketCollector sub) throws IOException { - if (QueryToFilterAdapter.MatchesNoDocs(filters()) && otherBucketKey == null) { + if (QueryToFilterAdapter.matchesNoDocs(filters()) && otherBucketKey == null) { return LeafBucketCollector.NO_OP_COLLECTOR; } - IntPredicate[] docFilters = new IntPredicate[filters().size()]; - for (int filterOrd = 0; filterOrd < filters().size(); filterOrd++) { - docFilters[filterOrd] = filters().get(filterOrd).matchingDocIds(aggCtx.getLeafReaderContext()); + final int numFilters = filters().size(); + List filterWrappers = new ArrayList<>(); + long totalCost = 0; + for (int filterOrd = 0; filterOrd < numFilters; filterOrd++) { + Scorer randomAccessScorer = filters().get(filterOrd).randomAccessScorer(aggCtx.getLeafReaderContext()); + if (randomAccessScorer == null) { + continue; + } + totalCost += randomAccessScorer.iterator().cost(); + filterWrappers.add( + randomAccessScorer.twoPhaseIterator() == null + ? new FilterMatchingDisiWrapper(randomAccessScorer, filterOrd) + : new TwoPhaseFilterMatchingDisiWrapper(randomAccessScorer, filterOrd) + ); } - return new LeafBucketCollectorBase(sub, null) { - @Override - public void collect(int doc, long bucket) throws IOException { - boolean matched = false; - for (int i = 0; i < docFilters.length; i++) { - if (docFilters[i].test(doc)) { - collectBucket(sub, doc, bucketOrd(bucket, i)); + + // Restrict the use of competitive iterator when there's no parent agg, no 'other' bucket (all values are accessed then) + // and the cost of per-filter doc iterator is smaller than maxDoc, indicating that there are docs matching the main + // query but not the filter query. + final boolean hasOtherBucket = otherBucketKey != null; + final boolean usesCompetitiveIterator = (parent == null + && hasOtherBucket == false + && filterWrappers.isEmpty() == false + && totalCost < aggCtx.getLeafReaderContext().reader().maxDoc()); + + if (filterWrappers.size() == 1) { + return new SingleFilterLeafCollector( + sub, + filterWrappers.get(0), + numFilters, + totalNumKeys, + usesCompetitiveIterator, + hasOtherBucket + ); + } + return new MultiFilterLeafCollector(sub, filterWrappers, numFilters, totalNumKeys, usesCompetitiveIterator, hasOtherBucket); + } + } + + private abstract static class AbstractLeafCollector extends LeafBucketCollectorBase { + final LeafBucketCollector sub; + final int numFilters; + final int totalNumKeys; + final boolean usesCompetitiveIterator; + final boolean hasOtherBucket; + + AbstractLeafCollector( + LeafBucketCollector sub, + int numFilters, + int totalNumKeys, + boolean usesCompetitiveIterator, + boolean hasOtherBucket + ) { + super(sub, null); + this.sub = sub; + this.numFilters = numFilters; + this.totalNumKeys = totalNumKeys; + this.usesCompetitiveIterator = usesCompetitiveIterator; + this.hasOtherBucket = hasOtherBucket; + } + + final long bucketOrd(long owningBucketOrdinal, int filterOrd) { + return owningBucketOrdinal * totalNumKeys + filterOrd; + } + } + + private class SingleFilterLeafCollector extends AbstractLeafCollector { + + final FilterMatchingDisiWrapper filterWrapper; + + SingleFilterLeafCollector( + LeafBucketCollector sub, + FilterMatchingDisiWrapper filterWrapper, + int numFilters, + int totalNumKeys, + boolean usesCompetitiveIterator, + boolean hasOtherBucket + ) { + super(sub, numFilters, totalNumKeys, usesCompetitiveIterator, hasOtherBucket); + this.filterWrapper = filterWrapper; + } + + public void collect(int doc, long bucket) throws IOException { + if (filterWrapper.approximation.docID() < doc) { + filterWrapper.approximation.advance(doc); + } + boolean matched = false; + if (filterWrapper.approximation.docID() == doc) { + if (filterWrapper.checkDocForMatch(doc)) { + collectBucket(sub, doc, bucketOrd(bucket, filterWrapper.filterOrd)); + matched = true; + } + } + if (hasOtherBucket && false == matched) { + collectBucket(sub, doc, bucketOrd(bucket, numFilters)); + } + } + + @Override + public DocIdSetIterator competitiveIterator() throws IOException { + if (usesCompetitiveIterator) { + return filterWrapper.approximation; + } + return null; + } + } + + private class MultiFilterLeafCollector extends AbstractLeafCollector { + + // A DocIdSetIterator heap with one entry for each filter, ordered by doc ID + final DisiPriorityQueue filterIterators; + + MultiFilterLeafCollector( + LeafBucketCollector sub, + List filterWrappers, + int numFilters, + int totalNumKeys, + boolean usesCompetitiveIterator, + boolean hasOtherBucket + ) { + super(sub, numFilters, totalNumKeys, usesCompetitiveIterator, hasOtherBucket); + filterIterators = filterWrappers.isEmpty() ? null : new DisiPriorityQueue(filterWrappers.size()); + for (FilterMatchingDisiWrapper wrapper : filterWrappers) { + filterIterators.add(wrapper); + } + } + + public void collect(int doc, long bucket) throws IOException { + boolean matched = false; + if (filterIterators != null) { + // Advance filters if necessary. Filters will already be advanced if used as a competitive iterator. + DisiWrapper top = filterIterators.top(); + while (top.doc < doc) { + top.doc = top.approximation.advance(doc); + top = filterIterators.updateTop(); + } + + if (top.doc == doc) { + for (DisiWrapper w = filterIterators.topList(); w != null; w = w.next) { + // It would be nice if DisiPriorityQueue supported generics to avoid unchecked casts. + FilterMatchingDisiWrapper topMatch = (FilterMatchingDisiWrapper) w; + if (topMatch.checkDocForMatch(doc)) { + collectBucket(sub, doc, bucketOrd(bucket, topMatch.filterOrd)); matched = true; } } - if (otherBucketKey != null && false == matched) { - collectBucket(sub, doc, bucketOrd(bucket, docFilters.length)); - } } - }; + } + + if (hasOtherBucket && false == matched) { + collectBucket(sub, doc, bucketOrd(bucket, numFilters)); + } } - final long bucketOrd(long owningBucketOrdinal, int filterOrd) { - return owningBucketOrdinal * totalNumKeys + filterOrd; + @Override + public DocIdSetIterator competitiveIterator() throws IOException { + if (usesCompetitiveIterator) { + // A DocIdSetIterator view of the filterIterators heap + assert filterIterators != null; + return new DisjunctionDISIApproximation(filterIterators); + } + return null; + } + } + + private static class FilterMatchingDisiWrapper extends DisiWrapper { + final int filterOrd; + + FilterMatchingDisiWrapper(Scorer scorer, int ord) { + super(scorer); + this.filterOrd = ord; + } + + boolean checkDocForMatch(int doc) throws IOException { + return true; + } + } + + private static class TwoPhaseFilterMatchingDisiWrapper extends FilterMatchingDisiWrapper { + // Tracks the last doc that matches the filter. + int lastMatchingDoc = -1; + // Tracks the last doc that was checked for filter matching. + int lastCheckedDoc = -1; + + TwoPhaseFilterMatchingDisiWrapper(Scorer scorer, int ord) { + super(scorer, ord); + } + + @Override + boolean checkDocForMatch(int doc) throws IOException { + // We need to cache the result of twoPhaseView.matches() since it's illegal to call it multiple times on the + // same doc, yet LeafBucketCollector#collect may be called multiple times with the same doc and multiple + // buckets. + if (lastCheckedDoc < doc) { + lastCheckedDoc = doc; + if (twoPhaseView.matches()) { + lastMatchingDoc = doc; + } + } + return (lastMatchingDoc == doc); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java index c86b0aea6f514..726589ca7c1b5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/InternalFilters.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.filter; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; @@ -147,7 +147,7 @@ public InternalFilters(String name, List buckets, boolean keyed, public InternalFilters(StreamInput in) throws IOException { super(in); keyed = in.readBoolean(); - keyedBucket = in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) ? in.readBoolean() : true; + keyedBucket = in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) ? in.readBoolean() : true; int size = in.readVInt(); List buckets = new ArrayList<>(size); for (int i = 0; i < size; i++) { @@ -160,10 +160,10 @@ public InternalFilters(StreamInput in) throws IOException { @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(keyed); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(keyedBucket); } - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java index 36afc481c1723..f7a613fbe142b 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/filter/QueryToFilterAdapter.java @@ -21,17 +21,17 @@ import org.apache.lucene.search.PointRangeQuery; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; +import org.apache.lucene.search.Scorer; +import org.apache.lucene.search.ScorerSupplier; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.util.List; import java.util.function.BiConsumer; -import java.util.function.IntPredicate; /** * Adapts a Lucene {@link Query} to the behaviors used be the @@ -171,16 +171,20 @@ private static Query unwrap(Query query) { } /** - * Build a predicate that the "compatible" implementation of the - * {@link FiltersAggregator} will use to figure out if the filter matches. - *

- * Consumers of this method will always call it with non-negative, - * increasing {@code int}s. A sequence like {@code 0, 1, 7, 8, 10} is fine. - * It won't call with {@code 0, 1, 0} or {@code -1, 0, 1}. + * Returns the {@link Scorer} that the "compatible" implementation of the {@link FiltersAggregator} will use + * to get an iterator over the docs matching the filter. The scorer is optimized for random access, since + * it will be skipping documents that don't match the main query or other filters. + * If the passed context contains no scorer, it returns a dummy scorer that matches no docs. */ - @SuppressWarnings("resource") // Closing the reader is someone else's problem - IntPredicate matchingDocIds(LeafReaderContext ctx) throws IOException { - return Lucene.asSequentialAccessBits(ctx.reader().maxDoc(), weight().scorerSupplier(ctx))::get; + Scorer randomAccessScorer(LeafReaderContext ctx) throws IOException { + Weight weight = weight(); + ScorerSupplier scorerSupplier = weight.scorerSupplier(ctx); + if (scorerSupplier == null) { + return null; + } + + // A leading cost of 0 instructs the scorer to optimize for random access as opposed to sequential access + return scorerSupplier.get(0L); } /** @@ -255,7 +259,7 @@ private Weight weight() throws IOException { * @param filters list of filters to check * @return true if all filters match no docs, otherwise false */ - static boolean MatchesNoDocs(List filters) { + static boolean matchesNoDocs(List filters) { for (QueryToFilterAdapter filter : filters) { if (filter.query() instanceof MatchNoDocsQuery == false) { return false; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java index 5adb8bbebd471..882b4960dd36c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoGridAggregationBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.io.stream.StreamInput; @@ -91,7 +91,7 @@ public GeoGridAggregationBuilder(StreamInput in) throws IOException { precision = in.readVInt(); requiredSize = in.readVInt(); shardSize = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { geoBoundingBox = new GeoBoundingBox(in); } } @@ -111,7 +111,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVInt(precision); out.writeVInt(requiredSize); out.writeVInt(shardSize); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { geoBoundingBox.writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java index ac51799acf70d..9fd1e3a393046 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoHashGridAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.io.stream.StreamInput; @@ -116,6 +117,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java index 7d6c6bbd15e38..2ee29c8a049ef 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/GeoTileGridAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.geogrid; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -112,6 +113,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java index 9348be3903cc3..f09dca2045d56 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/geogrid/InternalGeoGrid.java @@ -55,13 +55,13 @@ protected InternalGeoGrid(String name, int requiredSize, List) in.readList(getBucketReader()); + buckets = (List) in.readCollectionAsList(getBucketReader()); } @Override protected void doWriteTo(StreamOutput out) throws IOException { writeSize(requiredSize, out); - out.writeList(buckets); + out.writeCollection(buckets); } protected abstract InternalGeoGrid create( diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java index 3d34824644df7..d0aedf7e3aad6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/global/GlobalAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.global; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -80,6 +81,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java index 2c1537e17f672..ebbb290e1db9c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -156,7 +157,7 @@ public DateHistogramAggregationBuilder(StreamInput in) throws IOException { dateHistogramInterval = new DateIntervalWrapper(in); offset = in.readLong(); extendedBounds = in.readOptionalWriteable(LongBounds::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { hardBounds = in.readOptionalWriteable(LongBounds::new); } } @@ -179,7 +180,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { dateHistogramInterval.writeTo(out); out.writeLong(offset); out.writeOptionalWriteable(extendedBounds); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalWriteable(hardBounds); } } @@ -519,7 +520,7 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java index edf5b72fc0d1c..0740557a526d3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/HistogramAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -145,7 +146,7 @@ public HistogramAggregationBuilder(StreamInput in) throws IOException { minDocCount = in.readVLong(); interval = in.readDouble(); offset = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { extendedBounds = in.readOptionalWriteable(DoubleBounds::new); hardBounds = in.readOptionalWriteable(DoubleBounds::new); } else { @@ -166,7 +167,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { out.writeVLong(minDocCount); out.writeDouble(interval); out.writeDouble(offset); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalWriteable(extendedBounds); out.writeOptionalWriteable(hardBounds); } else { @@ -442,7 +443,7 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java index a94ab7aab4481..82716dca7311c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalDateHistogram.java @@ -243,7 +243,7 @@ public InternalDateHistogram(StreamInput in) throws IOException { offset = in.readLong(); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - buckets = in.readList(stream -> new Bucket(stream, keyed, format)); + buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); } @Override @@ -256,7 +256,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(offset); out.writeNamedWriteable(format); out.writeBoolean(keyed); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java index 77891e9a73beb..caef13221b0f3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalHistogram.java @@ -238,7 +238,7 @@ public InternalHistogram(StreamInput in) throws IOException { } format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - buckets = in.readList(stream -> new Bucket(stream, keyed, format)); + buckets = in.readCollectionAsList(stream -> new Bucket(stream, keyed, format)); } @Override @@ -250,7 +250,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeNamedWriteable(format); out.writeBoolean(keyed); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java index 1acf0b0ce385f..9f464fa1b23cb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/InternalVariableWidthHistogram.java @@ -260,7 +260,7 @@ public InternalVariableWidthHistogram(StreamInput in) throws IOException { super(in); emptyBucketInfo = new EmptyBucketInfo(in); format = in.readNamedWriteable(DocValueFormat.class); - buckets = in.readList(stream -> new Bucket(stream, format)); + buckets = in.readCollectionAsList(stream -> new Bucket(stream, format)); targetNumBuckets = in.readVInt(); } @@ -268,7 +268,7 @@ public InternalVariableWidthHistogram(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { emptyBucketInfo.writeTo(out); out.writeNamedWriteable(format); - out.writeList(buckets); + out.writeCollection(buckets); out.writeVInt(targetNumBuckets); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java index a0a42ba101981..9ca606f460fbb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/histogram/VariableWidthHistogramAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.histogram; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -247,6 +248,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java index c198ed4d038f6..915d7c32b4c74 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/missing/MissingAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.missing; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -120,6 +121,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java index 0839b2b83ceeb..b6e6a39356d46 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/NestedAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -161,6 +162,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java index ce8616b8fc530..e6c4e59bf3f93 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/nested/ReverseNestedAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.nested; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -177,6 +178,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java index 01e16341e2a5d..f0104599396dd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/InternalIpPrefix.java @@ -203,7 +203,7 @@ public InternalIpPrefix(StreamInput in) throws IOException { format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); minDocCount = in.readVLong(); - buckets = in.readList(stream -> new Bucket(stream, format, keyed)); + buckets = in.readCollectionAsList(stream -> new Bucket(stream, format, keyed)); } @Override @@ -216,7 +216,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeBoolean(keyed); out.writeVLong(minDocCount); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java index 304d3426c4f30..733a306a70c75 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/prefix/IpPrefixAggregationBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -315,6 +316,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_1_0; + return TransportVersions.V_8_1_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java index 87f726b47579e..6c18976b9463d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/AbstractRangeBuilder.java @@ -55,7 +55,7 @@ protected AbstractRangeBuilder(StreamInput in, InternalRange.Factory range throws IOException { super(in); this.rangeFactory = rangeFactory; - ranges = in.readList(rangeReader); + ranges = in.readCollectionAsList(rangeReader); keyed = in.readBoolean(); } @@ -109,7 +109,7 @@ protected int compare(int i, int j) { @Override protected void innerWriteTo(StreamOutput out) throws IOException { - out.writeList(ranges); + out.writeCollection(ranges); out.writeBoolean(keyed); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java index fa53153d50d6e..1dfb7a8dac2f5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/DateRangeAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.logging.DeprecationCategory; import org.elasticsearch.common.logging.DeprecationLogger; @@ -130,7 +131,7 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java index 61ee79c633aa8..a44d92f024e46 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/GeoDistanceAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.GeoDistance; import org.elasticsearch.common.geo.GeoPoint; @@ -311,7 +312,7 @@ public GeoPoint origin() { protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(origin.lat()); out.writeDouble(origin.lon()); - out.writeList(ranges); + out.writeCollection(ranges); out.writeBoolean(keyed); distanceType.writeTo(out); unit.writeTo(out); @@ -503,7 +504,7 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java index 88777f8a29286..23105bbe2d4f3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalBinaryRange.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -72,8 +72,8 @@ private static String generateKey(BytesRef from, BytesRef to, DocValueFormat for private static Bucket createFromStream(StreamInput in, DocValueFormat format, boolean keyed) throws IOException { // NOTE: the key is required in version == 8.0.0 and version <= 7.17.0, // while it is optional for all subsequent versions. - String key = in.getTransportVersion().equals(TransportVersion.V_8_0_0) ? in.readString() - : in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_1) ? in.readOptionalString() + String key = in.getTransportVersion().equals(TransportVersions.V_8_0_0) ? in.readString() + : in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_1) ? in.readOptionalString() : in.readString(); BytesRef from = in.readBoolean() ? in.readBytesRef() : null; BytesRef to = in.readBoolean() ? in.readBytesRef() : null; @@ -85,9 +85,9 @@ private static Bucket createFromStream(StreamInput in, DocValueFormat format, bo @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().equals(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().equals(TransportVersions.V_8_0_0)) { out.writeString(key == null ? generateKey(from, to, format) : key); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_1)) { + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_1)) { out.writeOptionalString(key); } else { out.writeString(key == null ? generateKey(from, to, format) : key); @@ -216,14 +216,14 @@ public InternalBinaryRange(StreamInput in) throws IOException { super(in); format = in.readNamedWriteable(DocValueFormat.class); keyed = in.readBoolean(); - buckets = in.readList(stream -> Bucket.createFromStream(stream, format, keyed)); + buckets = in.readCollectionAsList(stream -> Bucket.createFromStream(stream, format, keyed)); } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeBoolean(keyed); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java index f821aa3440303..f125e19c75b48 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/InternalRange.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.search.aggregations.bucket.range; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -160,19 +160,19 @@ private static String generateKey(double from, double to, DocValueFormat format) public void writeTo(StreamOutput out) throws IOException { // NOTE: the key is required in version == 8.0.0 and version <= 7.17.0, // while it is optional for all subsequent versions. - if (out.getTransportVersion().equals(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().equals(TransportVersions.V_8_0_0)) { out.writeString(key == null ? generateKey(from, to, format) : key); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_1)) { + } else if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_1)) { out.writeOptionalString(key); } else { out.writeString(key == null ? generateKey(from, to, format) : key); } out.writeDouble(from); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { out.writeOptionalDouble(from); } out.writeDouble(to); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { out.writeOptionalDouble(to); } out.writeVLong(docCount); @@ -270,11 +270,11 @@ public InternalRange(StreamInput in) throws IOException { for (int i = 0; i < size; i++) { // NOTE: the key is required in version == 8.0.0 and version <= 7.17.0, // while it is optional for all subsequent versions. - final String key = in.getTransportVersion().equals(TransportVersion.V_8_0_0) ? in.readString() - : in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_1) ? in.readOptionalString() + final String key = in.getTransportVersion().equals(TransportVersions.V_8_0_0) ? in.readString() + : in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_1) ? in.readOptionalString() : in.readString(); double from = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { final Double originalFrom = in.readOptionalDouble(); if (originalFrom != null) { from = originalFrom; @@ -283,7 +283,7 @@ public InternalRange(StreamInput in) throws IOException { } } double to = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { final Double originalTo = in.readOptionalDouble(); if (originalTo != null) { to = originalTo; diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java index 64b3c6a0839bc..ef580929521d0 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/IpRangeAggregationBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -427,6 +428,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java index c94c0176983b7..9fdbaa10509e6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.range; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.DocValueFormat; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -211,6 +212,6 @@ private static String generateKey(double from, double to, DocValueFormat format) @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java index 8c86201bafefe..98237d19f0f33 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/range/RangeAggregator.java @@ -9,7 +9,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.ScorerSupplier; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -174,8 +174,8 @@ public Range(StreamInput in) throws IOException { toAsStr = in.readOptionalString(); from = in.readDouble(); to = in.readDouble(); - originalFrom = in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0) ? in.readOptionalDouble() : Double.valueOf(from); - originalTo = in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0) ? in.readOptionalDouble() : Double.valueOf(to); + originalFrom = in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0) ? in.readOptionalDouble() : Double.valueOf(from); + originalTo = in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0) ? in.readOptionalDouble() : Double.valueOf(to); } @Override @@ -185,7 +185,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(toAsStr); out.writeDouble(from); out.writeDouble(to); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { out.writeOptionalDouble(originalFrom); out.writeOptionalDouble(originalTo); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java index b05a68f6f4f90..6dd998c0db043 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/DiversifiedAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -212,6 +213,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java index 116a0d6dbb32e..9795097f308da 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/SamplerAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -145,6 +146,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java index becbc8d809e0a..80d396d9aff7d 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/sampler/random/RandomSamplerAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.sampler.random; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -169,7 +170,7 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_2_0; + return TransportVersions.V_8_2_0; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java index 26468dc8fc54a..eb50de5f7ea59 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/IncludeExclude.java @@ -21,7 +21,7 @@ import org.apache.lucene.util.automaton.Operations; import org.apache.lucene.util.hppc.BitMixer; import org.elasticsearch.ElasticsearchParseException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -387,7 +387,7 @@ public IncludeExclude(StreamInput in) throws IOException { include = includeString == null ? null : new RegExp(includeString); String excludeString = in.readOptionalString(); exclude = excludeString == null ? null : new RegExp(excludeString); - if (in.getTransportVersion().before(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().before(TransportVersions.V_7_11_0)) { incZeroBasedPartition = 0; incNumPartitions = 0; includeValues = null; @@ -427,7 +427,7 @@ public void writeTo(StreamOutput out) throws IOException { if (regexBased) { out.writeOptionalString(include == null ? null : include.getOriginalString()); out.writeOptionalString(exclude == null ? null : exclude.getOriginalString()); - if (out.getTransportVersion().before(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().before(TransportVersions.V_7_11_0)) { return; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java index 87c6ff5a462de..61f15978bc154 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedRareTerms.java @@ -72,14 +72,14 @@ SetBackedScalingCuckooFilter getFilter() { InternalMappedRareTerms(StreamInput in, Bucket.Reader bucketReader) throws IOException { super(in); format = in.readNamedWriteable(DocValueFormat.class); - buckets = in.readList(stream -> bucketReader.read(stream, format)); + buckets = in.readCollectionAsList(stream -> bucketReader.read(stream, format)); filter = new SetBackedScalingCuckooFilter(in, Randomness.get()); } @Override protected void writeTermTypeInfoTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); - out.writeList(buckets); + out.writeCollection(buckets); filter.writeTo(out); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java index ea32df7683e49..c0d7103e42e8e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedSignificantTerms.java @@ -58,7 +58,7 @@ protected InternalMappedSignificantTerms(StreamInput in, Bucket.Reader bucket subsetSize = in.readVLong(); supersetSize = in.readVLong(); significanceHeuristic = in.readNamedWriteable(SignificanceHeuristic.class); - buckets = in.readList(stream -> bucketReader.read(stream, subsetSize, supersetSize, format)); + buckets = in.readCollectionAsList(stream -> bucketReader.read(stream, subsetSize, supersetSize, format)); } @Override @@ -67,7 +67,7 @@ protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { out.writeVLong(subsetSize); out.writeVLong(supersetSize); out.writeNamedWriteable(significanceHeuristic); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java index fb3b262e5f0b2..fe41df12feafa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalMappedTerms.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -63,7 +63,7 @@ protected InternalMappedTerms( */ protected InternalMappedTerms(StreamInput in, Bucket.Reader bucketReader) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { if (in.readBoolean()) { docCountError = in.readZLong(); } else { @@ -76,12 +76,12 @@ protected InternalMappedTerms(StreamInput in, Bucket.Reader bucketReader) thr shardSize = readSize(in); showTermDocCountError = in.readBoolean(); otherDocCount = in.readVLong(); - buckets = in.readList(stream -> bucketReader.read(stream, format, showTermDocCountError)); + buckets = in.readCollectionAsList(stream -> bucketReader.read(stream, format, showTermDocCountError)); } @Override protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { if (docCountError != null) { out.writeBoolean(true); out.writeZLong(docCountError); @@ -95,7 +95,7 @@ protected final void writeTermTypeInfoTo(StreamOutput out) throws IOException { writeSize(shardSize, out); out.writeBoolean(showTermDocCountError); out.writeVLong(otherDocCount); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java index 318f8c16893b6..85307a903a3eb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/InternalTerms.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.search.aggregations.bucket.terms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -197,7 +197,7 @@ protected InternalTerms( protected InternalTerms(StreamInput in) throws IOException { super(in); reduceOrder = InternalOrder.Streams.readOrder(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { order = InternalOrder.Streams.readOrder(in); } else { order = reduceOrder; @@ -208,7 +208,7 @@ protected InternalTerms(StreamInput in) throws IOException { @Override protected final void doWriteTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { reduceOrder.writeTo(out); } order.writeTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java index 66e91ef01e0ab..768c962d13db9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/RareTermsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -250,6 +251,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_3_0; + return TransportVersions.V_7_3_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java index d4589a533c67f..9fa9e65eb145a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTermsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; @@ -380,6 +381,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_3_0; + return TransportVersions.V_7_3_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java index 77f2551fad6f9..a73d12c23a378 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/SignificantTextAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -400,6 +401,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_3_0; + return TransportVersions.V_7_3_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java index cf75abfee4d02..46b5e2d6d7980 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.search.aggregations.bucket.terms; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -442,6 +443,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java index 7f31fc9110efc..ee81039569f92 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalHDRPercentiles.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.HdrHistogram.DoubleHistogram; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -58,7 +58,7 @@ abstract class AbstractInternalHDRPercentiles extends InternalNumericMetricsAggr protected AbstractInternalHDRPercentiles(StreamInput in) throws IOException { super(in); keys = in.readDoubleArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { if (in.readBoolean()) { state = decode(in); } else { @@ -87,7 +87,7 @@ private DoubleHistogram decode(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeDoubleArray(keys); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { if (this.state != null) { out.writeBoolean(true); encode(this.state, out); @@ -96,7 +96,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { } } else { DoubleHistogram state = this.state != null ? this.state - : out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0) ? EMPTY_HISTOGRAM_ZERO_DIGITS + : out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0) ? EMPTY_HISTOGRAM_ZERO_DIGITS : EMPTY_HISTOGRAM_THREE_DIGITS; encode(state, out); } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java index 0f5e8c1c6e75e..0faa01c8c4410 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractInternalTDigestPercentiles.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -61,7 +61,7 @@ abstract class AbstractInternalTDigestPercentiles extends InternalNumericMetrics protected AbstractInternalTDigestPercentiles(StreamInput in) throws IOException { super(in); keys = in.readDoubleArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { if (in.readBoolean()) { state = TDigestState.read(in); } else { @@ -77,7 +77,7 @@ protected AbstractInternalTDigestPercentiles(StreamInput in) throws IOException protected void doWriteTo(StreamOutput out) throws IOException { out.writeNamedWriteable(format); out.writeDoubleArray(keys); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { if (this.state != null) { out.writeBoolean(true); TDigestState.write(state, out); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java index 55976bb41825c..c66a93b480558 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AbstractPercentilesAggregationBuilder.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -146,7 +146,7 @@ public static > ConstructingO this.valuesField = valuesField; values = in.readDoubleArray(); keyed = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { percentilesConfig = (PercentilesConfig) in.readOptionalWriteable((Reader) PercentilesConfig::fromStream); } else { int numberOfSignificantValueDigits = in.readVInt(); @@ -165,7 +165,7 @@ public boolean supportsSampling() { protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDoubleArray(values); out.writeBoolean(keyed); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { out.writeOptionalWriteable(percentilesConfig); } else { // Legacy method serialized both SigFigs and compression, even though we only need one. So we need diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java index 13f2f79c1282b..e66d3d0a34580 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/AvgAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -105,6 +106,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java index 51037b85ab292..03e3dc93675d3 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/CardinalityAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -85,7 +86,7 @@ public CardinalityAggregationBuilder(StreamInput in) throws IOException { if (in.readBoolean()) { precisionThreshold = in.readLong(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { executionHint = in.readOptionalString(); } } @@ -102,7 +103,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { if (hasPrecisionThreshold) { out.writeLong(precisionThreshold); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalString(executionHint); } } @@ -224,6 +225,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java index ee9895cade71e..1078f48a39317 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ExtendedStatsAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -150,6 +151,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java index 4c7b25da338fb..db7fdb5995eec 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoBoundsAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -157,6 +158,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java index 1a3a6966521c4..ac0ffae7ac2e1 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GeoCentroidAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -112,6 +113,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GlobalOrdCardinalityAggregator.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GlobalOrdCardinalityAggregator.java index 93e44c0343d88..ee412666a21fa 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GlobalOrdCardinalityAggregator.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/GlobalOrdCardinalityAggregator.java @@ -66,8 +66,8 @@ public class GlobalOrdCardinalityAggregator extends NumericMetricsAggregator.Sin // Build at post-collection phase @Nullable private HyperLogLogPlusPlusSparse counts; - private SortedSetDocValues values; private ObjectArray visitedOrds; + private SortedSetDocValues values; public GlobalOrdCardinalityAggregator( String name, @@ -211,6 +211,7 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, if (maxOrd <= MAX_FIELD_CARDINALITY_FOR_DYNAMIC_PRUNING || numNonVisitedOrds <= MAX_TERMS_FOR_DYNAMIC_PRUNING) { dynamicPruningAttempts++; return new LeafBucketCollector() { + final SortedSetDocValues docValues = values; final BitArray bits; final CompetitiveIterator competitiveIterator; @@ -234,8 +235,8 @@ public LeafBucketCollector getLeafCollector(AggregationExecutionContext aggCtx, @Override public void collect(int doc, long bucketOrd) throws IOException { - if (values.advanceExact(doc)) { - for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { + if (docValues.advanceExact(doc)) { + for (long ord = docValues.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = docValues.nextOrd()) { if (bits.getAndSet(ord) == false) { competitiveIterator.onVisitedOrdinal(ord); } @@ -267,6 +268,8 @@ public CompetitiveIterator competitiveIterator() { bruteForce++; return new LeafBucketCollector() { + final SortedSetDocValues docValues = values; + @Override public void collect(int doc, long bucketOrd) throws IOException { visitedOrds = bigArrays.grow(visitedOrds, bucketOrd + 1); @@ -275,8 +278,8 @@ public void collect(int doc, long bucketOrd) throws IOException { bits = new BitArray(maxOrd, bigArrays); visitedOrds.set(bucketOrd, bits); } - if (values.advanceExact(doc)) { - for (long ord = values.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = values.nextOrd()) { + if (docValues.advanceExact(doc)) { + for (long ord = docValues.nextOrd(); ord != SortedSetDocValues.NO_MORE_ORDS; ord = docValues.nextOrd()) { bits.set((int) ord); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java index 6adf77af3f92a..15f10b3a6d110 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalGeoCentroid.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.apache.lucene.geo.GeoEncodingUtils; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.geo.SpatialPoint; import org.elasticsearch.common.io.stream.StreamInput; @@ -64,7 +64,7 @@ public static InternalGeoCentroid empty(String name, Map metadat @Override protected GeoPoint centroidFromStream(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { return new GeoPoint(in.readDouble(), in.readDouble()); } else { final long hash = in.readLong(); @@ -74,7 +74,7 @@ protected GeoPoint centroidFromStream(StreamInput in) throws IOException { @Override protected void centroidToStream(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { out.writeDouble(centroid.getY()); out.writeDouble(centroid.getX()); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java index 1cdbc057834f1..1580c9ce22a29 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetric.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.CollectionUtils; @@ -45,17 +45,17 @@ public class InternalScriptedMetric extends InternalAggregation implements Scrip public InternalScriptedMetric(StreamInput in) throws IOException { super(in); reduceScript = in.readOptionalWriteable(Script::new); - if (in.getTransportVersion().before(TransportVersion.V_7_8_0)) { + if (in.getTransportVersion().before(TransportVersions.V_7_8_0)) { aggregations = singletonList(in.readGenericValue()); } else { - aggregations = in.readList(StreamInput::readGenericValue); + aggregations = in.readCollectionAsList(StreamInput::readGenericValue); } } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(reduceScript); - if (out.getTransportVersion().before(TransportVersion.V_7_8_0)) { + if (out.getTransportVersion().before(TransportVersions.V_7_8_0)) { if (aggregations.size() > 1) { /* * If aggregations has more than one entry we're trying to diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java index 57b97bbb6fe9f..b4bb1f01b0662 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MaxAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -109,6 +110,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java index b30bf50e5a8ea..315e0bab027c5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MedianAbsoluteDeviationAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -63,10 +64,8 @@ public MedianAbsoluteDeviationAggregationBuilder(String name) { public MedianAbsoluteDeviationAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - executionHint = TDigestExecutionHint.readFrom(in); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; } @@ -128,10 +127,8 @@ protected ValuesSourceType defaultValueSourceType() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(executionHint); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - (executionHint == null ? TDigestExecutionHint.DEFAULT : executionHint).writeTo(out); } } @@ -197,6 +194,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java index fff112e04f7b2..3d4957feba7db 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/MinAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -111,6 +112,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java index e7b869091976b..e61105b5822cf 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentileRanksAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -116,6 +117,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java index 96fd8052c0cc4..0347d157cf3c5 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregatorFactories; @@ -152,6 +153,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java index a9c567b9dd22d..ae20e3efa6208 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/PercentilesConfig.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -143,8 +143,8 @@ public TDigest(double compression, TDigestExecutionHint executionHint) { TDigest(StreamInput in) throws IOException { this( in.readDouble(), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018) ? in.readOptionalWriteable(TDigestExecutionHint::readFrom) - : in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014) ? TDigestExecutionHint.readFrom(in) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) + ? in.readOptionalWriteable(TDigestExecutionHint::readFrom) : TDigestExecutionHint.HIGH_ACCURACY ); } @@ -248,10 +248,8 @@ public InternalNumericMetricsAggregation.MultiValue createEmptyPercentileRanksAg public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(executionHint); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - (executionHint == null ? TDigestExecutionHint.DEFAULT : executionHint).writeTo(out); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java index 8d92ccc16e09d..8386bb8bbdb06 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ScriptedMetricAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.script.Script; @@ -299,7 +300,7 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override @@ -320,4 +321,8 @@ public boolean equals(Object obj) { && Objects.equals(params, other.params); } + @Override + public boolean supportsParallelCollection() { + return false; + } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java index f13fe41d9b04c..e978ffec42b4a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/StatsAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -116,7 +117,7 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java index 201c9e87d63a1..da441363020bd 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/SumAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -109,6 +110,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java index 2fbe2e679c1ab..c631e30a0e64f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TDigestState.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.tdigest.Centroid; @@ -111,7 +111,7 @@ public final double compression() { public static void write(TDigestState state, StreamOutput out) throws IOException { out.writeDouble(state.compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeString(state.type.toString()); out.writeVLong(state.tdigest.size()); } @@ -127,7 +127,7 @@ public static TDigestState read(StreamInput in) throws IOException { double compression = in.readDouble(); TDigestState state; long size = 0; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { state = new TDigestState(Type.valueOf(in.readString()), compression); size = in.readVLong(); } else { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java index 3772eec79871b..4a5df855b5095 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/TopHitsAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -129,9 +130,9 @@ public TopHitsAggregationBuilder(StreamInput in) throws IOException { trackScores = in.readBoolean(); version = in.readBoolean(); seqNoAndPrimaryTerm = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { if (in.readBoolean()) { - fetchFields = in.readList(FieldAndFormat::new); + fetchFields = in.readCollectionAsList(FieldAndFormat::new); } } } @@ -143,7 +144,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { boolean hasFieldDataFields = docValueFields != null; out.writeBoolean(hasFieldDataFields); if (hasFieldDataFields) { - out.writeList(docValueFields); + out.writeCollection(docValueFields); } out.writeOptionalWriteable(storedFieldsContext); out.writeVInt(from); @@ -157,15 +158,15 @@ protected void doWriteTo(StreamOutput out) throws IOException { boolean hasSorts = sorts != null; out.writeBoolean(hasSorts); if (hasSorts) { - out.writeNamedWriteableList(sorts); + out.writeNamedWriteableCollection(sorts); } out.writeBoolean(trackScores); out.writeBoolean(version); out.writeBoolean(seqNoAndPrimaryTerm); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { - out.writeList(fetchFields); + out.writeCollection(fetchFields); } } } @@ -918,6 +919,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java index 465fe3880076d..71f745559fc77 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/ValueCountAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -118,6 +119,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java index e0ccf799d18cf..f983dc28bf26c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/metrics/WeightedAvgAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; @@ -130,6 +131,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java index 4716ab2d2ff8d..74bb9a8881d79 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AbstractPipelineAggregationBuilder.java @@ -92,11 +92,7 @@ public final XContentBuilder toXContent(XContentBuilder builder, Params params) builder.startObject(type); if (overrideBucketsPath() == false && bucketsPaths != null) { - builder.startArray(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName()); - for (String path : bucketsPaths) { - builder.value(path); - } - builder.endArray(); + builder.array(PipelineAggregator.Parser.BUCKETS_PATH.getPreferredName(), bucketsPaths); } internalXContent(builder, params); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java index ad34437e8ed38..4118f2b263bf9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/AvgBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java index b3aa03cb9b401..4993c8ec25d83 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/BucketScriptPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; @@ -95,7 +96,7 @@ public BucketScriptPipelineAggregationBuilder(StreamInput in) throws IOException @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeMap(bucketsPathsMap, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(bucketsPathsMap, StreamOutput::writeString); script.writeTo(out); out.writeOptionalString(format); gapPolicy.writeTo(out); @@ -224,6 +225,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java index 639358b023f18..c2816629b653f 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/CumulativeSumPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -129,6 +130,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java index 452d28ca8cc61..b781da3f81d58 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/ExtendedStatsBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -100,6 +101,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java index a40660fad53eb..7ddabd0f7a4c9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/InternalBucketMetricValue.java @@ -96,11 +96,7 @@ public XContentBuilder doXContentBody(XContentBuilder builder, Params params) th if (hasValue && format != DocValueFormat.RAW) { builder.field(CommonFields.VALUE_AS_STRING.getPreferredName(), format.format(value).toString()); } - builder.startArray(KEYS_FIELD.getPreferredName()); - for (String key : keys) { - builder.value(key); - } - builder.endArray(); + builder.array(KEYS_FIELD.getPreferredName(), keys); return builder; } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java index 7d1a35d051fdb..d3383f5a7b31a 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MaxBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java index 566be88cb0177..2259104fbf0a6 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MinBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java index 4de5c6099523f..c174dd5458685 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/MovAvgPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -76,6 +77,6 @@ public final String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java index be0d0b6a6e2b8..a47f1255e0fe9 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/PercentilesBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ParseField; @@ -119,7 +120,7 @@ protected XContentBuilder doXContentBody(XContentBuilder builder, Params params) @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } public static final PipelineAggregator.Parser PARSER = new BucketMetricsParser() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java index 243271fba3612..1143594e98d16 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SerialDiffPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -249,6 +250,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java index c46aceaf37333..9c72de0e3a43c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/StatsBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java index 3c499b9acf516..94d6bb9af1c95 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/pipeline/SumBucketPipelineAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.pipeline; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -63,6 +64,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java index 8e832e2caa258..e3662b150270c 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/AggregationInfo.java @@ -52,7 +52,7 @@ public AggregationInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(aggs, StreamOutput::writeString, StreamOutput::writeStringCollection); + out.writeMap(aggs, StreamOutput::writeStringCollection); } public Map> getAggregations() { diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java index 42d1fc745392f..b0a5541f54a6e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceAggregationBuilder.java @@ -104,7 +104,7 @@ private void read(StreamInput in) throws IOException { @Override protected final void doWriteTo(StreamOutput out) throws IOException { - out.writeMap(fields, StreamOutput::writeString, (o, value) -> value.writeTo(o)); + out.writeMap(fields, StreamOutput::writeWriteable); out.writeOptionalWriteable(userValueTypeHint); out.writeOptionalString(format); innerWriteTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java index 22d1cbe6864df..9f698528dcefb 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/MultiValuesSourceFieldConfig.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.support; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -161,7 +161,7 @@ protected MultiValuesSourceFieldConfig( } public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { this.fieldName = in.readOptionalString(); } else { this.fieldName = in.readString(); @@ -169,19 +169,19 @@ public MultiValuesSourceFieldConfig(StreamInput in) throws IOException { this.missing = in.readGenericValue(); this.script = in.readOptionalWriteable(Script::new); this.timeZone = in.readOptionalZoneId(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { this.filter = in.readOptionalNamedWriteable(QueryBuilder.class); } else { this.filter = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { this.userValueTypeHint = in.readOptionalWriteable(ValueType::readFromStream); this.format = in.readOptionalString(); } else { this.userValueTypeHint = null; this.format = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.includeExclude = in.readOptionalWriteable(IncludeExclude::new); } else { this.includeExclude = null; @@ -222,7 +222,7 @@ public IncludeExclude getIncludeExclude() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeOptionalString(fieldName); } else { out.writeString(fieldName); @@ -230,14 +230,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericValue(missing); out.writeOptionalWriteable(script); out.writeOptionalZoneId(timeZone); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { out.writeOptionalNamedWriteable(filter); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeOptionalWriteable(userValueTypeHint); out.writeOptionalString(format); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalWriteable(includeExclude); } } diff --git a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java index 16efc62f2704f..375ccd127dc9e 100644 --- a/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java +++ b/server/src/main/java/org/elasticsearch/search/aggregations/support/TimeSeriesIndexSearcher.java @@ -95,11 +95,13 @@ public void search(Query query, BucketCollector bucketCollector) throws IOExcept Weight weight = searcher.createWeight(query, bucketCollector.scoreMode(), 1); if (searcher.getExecutor() == null) { search(bucketCollector, weight); + bucketCollector.postCollection(); return; } // offload to the search worker thread pool whenever possible. It will be null only when search.worker_threads_enabled is false RunnableFuture task = new FutureTask<>(() -> { search(bucketCollector, weight); + bucketCollector.postCollection(); return null; }); searcher.getExecutor().execute(task); diff --git a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java index 8dab3cac700cb..043968e254d1d 100644 --- a/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/builder/SearchSourceBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.builder; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -206,18 +206,18 @@ public SearchSourceBuilder(StreamInput in) throws IOException { explain = in.readOptionalBoolean(); fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::readFrom); if (in.readBoolean()) { - docValueFields = in.readList(FieldAndFormat::new); + docValueFields = in.readCollectionAsList(FieldAndFormat::new); } else { docValueFields = null; } storedFieldsContext = in.readOptionalWriteable(StoredFieldsContext::new); from = in.readVInt(); highlightBuilder = in.readOptionalWriteable(HighlightBuilder::new); - indexBoosts = in.readList(IndexBoost::new); + indexBoosts = in.readCollectionAsList(IndexBoost::new); minScore = in.readOptionalFloat(); postQueryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_013)) { - subSearchSourceBuilders = in.readList(SubSearchSourceBuilder::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + subSearchSourceBuilders = in.readCollectionAsList(SubSearchSourceBuilder::new); } else { QueryBuilder queryBuilder = in.readOptionalNamedWriteable(QueryBuilder.class); if (queryBuilder != null) { @@ -225,10 +225,10 @@ public SearchSourceBuilder(StreamInput in) throws IOException { } } if (in.readBoolean()) { - rescoreBuilders = in.readNamedWriteableList(RescorerBuilder.class); + rescoreBuilders = in.readNamedWriteableCollectionAsList(RescorerBuilder.class); } if (in.readBoolean()) { - scriptFields = in.readList(ScriptField::new); + scriptFields = in.readCollectionAsList(ScriptField::new); } size = in.readVInt(); if (in.readBoolean()) { @@ -239,7 +239,7 @@ public SearchSourceBuilder(StreamInput in) throws IOException { } } if (in.readBoolean()) { - stats = in.readStringList(); + stats = in.readStringCollectionAsList(); } suggestBuilder = in.readOptionalWriteable(SuggestBuilder::new); terminateAfter = in.readVInt(); @@ -247,30 +247,30 @@ public SearchSourceBuilder(StreamInput in) throws IOException { trackScores = in.readBoolean(); version = in.readOptionalBoolean(); seqNoAndPrimaryTerm = in.readOptionalBoolean(); - extBuilders = in.readNamedWriteableList(SearchExtBuilder.class); + extBuilders = in.readNamedWriteableCollectionAsList(SearchExtBuilder.class); profile = in.readBoolean(); searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); sliceBuilder = in.readOptionalWriteable(SliceBuilder::new); collapse = in.readOptionalWriteable(CollapseBuilder::new); trackTotalHitsUpTo = in.readOptionalInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { if (in.readBoolean()) { - fetchFields = in.readList(FieldAndFormat::new); + fetchFields = in.readCollectionAsList(FieldAndFormat::new); } pointInTimeBuilder = in.readOptionalWriteable(PointInTimeBuilder::new); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { runtimeMappings = in.readMap(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { - if (in.getTransportVersion().before(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_7_0)) { KnnSearchBuilder searchBuilder = in.readOptionalWriteable(KnnSearchBuilder::new); knnSearch = searchBuilder != null ? List.of(searchBuilder) : List.of(); } else { - knnSearch = in.readList(KnnSearchBuilder::new); + knnSearch = in.readCollectionAsList(KnnSearchBuilder::new); } } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { rankBuilder = in.readOptionalNamedWriteable(RankBuilder.class); } } @@ -282,17 +282,17 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(fetchSourceContext); out.writeBoolean(docValueFields != null); if (docValueFields != null) { - out.writeList(docValueFields); + out.writeCollection(docValueFields); } out.writeOptionalWriteable(storedFieldsContext); out.writeVInt(from); out.writeOptionalWriteable(highlightBuilder); - out.writeList(indexBoosts); + out.writeCollection(indexBoosts); out.writeOptionalFloat(minScore); out.writeOptionalNamedWriteable(postQueryBuilder); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_013)) { - out.writeList(subSearchSourceBuilders); - } else if (out.getTransportVersion().before(TransportVersion.V_8_4_0) && subSearchSourceBuilders.size() >= 2) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + out.writeCollection(subSearchSourceBuilders); + } else if (out.getTransportVersion().before(TransportVersions.V_8_4_0) && subSearchSourceBuilders.size() >= 2) { throw new IllegalArgumentException("cannot serialize [sub_searches] to version [" + out.getTransportVersion() + "]"); } else { out.writeOptionalNamedWriteable(query()); @@ -300,18 +300,18 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasRescoreBuilders = rescoreBuilders != null; out.writeBoolean(hasRescoreBuilders); if (hasRescoreBuilders) { - out.writeNamedWriteableList(rescoreBuilders); + out.writeNamedWriteableCollection(rescoreBuilders); } boolean hasScriptFields = scriptFields != null; out.writeBoolean(hasScriptFields); if (hasScriptFields) { - out.writeList(scriptFields); + out.writeCollection(scriptFields); } out.writeVInt(size); boolean hasSorts = sorts != null; out.writeBoolean(hasSorts); if (hasSorts) { - out.writeNamedWriteableList(sorts); + out.writeNamedWriteableCollection(sorts); } boolean hasStats = stats != null; out.writeBoolean(hasStats); @@ -324,20 +324,20 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(trackScores); out.writeOptionalBoolean(version); out.writeOptionalBoolean(seqNoAndPrimaryTerm); - out.writeNamedWriteableList(extBuilders); + out.writeNamedWriteableCollection(extBuilders); out.writeBoolean(profile); out.writeOptionalWriteable(searchAfterBuilder); out.writeOptionalWriteable(sliceBuilder); out.writeOptionalWriteable(collapse); out.writeOptionalInt(trackTotalHitsUpTo); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { - out.writeList(fetchFields); + out.writeCollection(fetchFields); } out.writeOptionalWriteable(pointInTimeBuilder); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { out.writeGenericMap(runtimeMappings); } else { if (false == runtimeMappings.isEmpty()) { @@ -346,8 +346,8 @@ public void writeTo(StreamOutput out) throws IOException { ); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { - if (out.getTransportVersion().before(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_7_0)) { if (knnSearch.size() > 1) { throw new IllegalArgumentException( "Versions before 8070099 don't support multiple [knn] search clauses and search was sent to [" @@ -360,7 +360,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(knnSearch); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalNamedWriteable(rankBuilder); } else if (rankBuilder != null) { throw new IllegalArgumentException("cannot serialize [rank] to version [" + out.getTransportVersion() + "]"); @@ -2095,6 +2095,14 @@ public String toString(Params params) { } public boolean supportsParallelCollection() { + if (profile) return false; + + if (sorts != null) { + for (SortBuilder sortBuilder : sorts) { + if (sortBuilder.supportsParallelCollection() == false) return false; + } + } + return collapse == null && (aggregations == null || aggregations.supportsParallelCollection()); } } diff --git a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java index 03882eba07c41..049e06b0d98c7 100644 --- a/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/collapse/CollapseBuilder.java @@ -78,14 +78,14 @@ public CollapseBuilder(String field) { public CollapseBuilder(StreamInput in) throws IOException { this.field = in.readString(); this.maxConcurrentGroupRequests = in.readVInt(); - this.innerHits = in.readList(InnerHitBuilder::new); + this.innerHits = in.readCollectionAsList(InnerHitBuilder::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeVInt(maxConcurrentGroupRequests); - out.writeList(innerHits); + out.writeCollection(innerHits); } public static CollapseBuilder fromXContent(XContentParser parser) { diff --git a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java index 9c12bf3a4e497..1a7b7b616021f 100644 --- a/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/dfs/DfsSearchResult.java @@ -12,7 +12,7 @@ import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchPhaseResult; @@ -53,18 +53,18 @@ public DfsSearchResult(StreamInput in) throws IOException { fieldStatistics = readFieldStats(in); maxDoc = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { setShardSearchRequest(in.readOptionalWriteable(ShardSearchRequest::new)); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - knnResults = in.readOptionalList(DfsKnnResults::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { + knnResults = in.readOptionalCollectionAsList(DfsKnnResults::new); } else { DfsKnnResults results = in.readOptionalWriteable(DfsKnnResults::new); knnResults = results != null ? List.of(results) : List.of(); } } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchProfileDfsPhaseResult = in.readOptionalWriteable(SearchProfileDfsPhaseResult::new); } } @@ -135,11 +135,11 @@ public void writeTo(StreamOutput out) throws IOException { writeTermStats(out, termStatistics); writeFieldStats(out, fieldStatistics); out.writeVInt(maxDoc); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalWriteable(getShardSearchRequest()); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalCollection(knnResults); } else { if (knnResults != null && knnResults.size() > 1) { @@ -152,13 +152,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(knnResults == null || knnResults.isEmpty() ? null : knnResults.get(0)); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeOptionalWriteable(searchProfileDfsPhaseResult); } } public static void writeFieldStats(StreamOutput out, Map fieldStatistics) throws IOException { - out.writeMap(fieldStatistics, StreamOutput::writeString, (o, statistics) -> { + out.writeMap(fieldStatistics, (o, statistics) -> { assert statistics.maxDoc() >= 0; o.writeVLong(statistics.maxDoc()); // stats are always positive numbers diff --git a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java index f5bef5ea5f2a4..c25c3575a8c4b 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/FetchSearchResult.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.fetch; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.SearchHit; @@ -39,7 +39,7 @@ public FetchSearchResult(StreamInput in) throws IOException { super(in); contextId = new ShardSearchContextId(in); hits = new SearchHits(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { profileResult = in.readOptionalWriteable(ProfileResult::new); } else { profileResult = null; @@ -50,7 +50,7 @@ public FetchSearchResult(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { contextId.writeTo(out); hits.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalWriteable(profileResult); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java index 1cd6a5d712469..e6fc229e1e648 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/ShardFetchSearchRequest.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.fetch; import org.apache.lucene.search.ScoreDoc; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; @@ -53,7 +53,7 @@ public ShardFetchSearchRequest( public ShardFetchSearchRequest(StreamInput in) throws IOException { super(in); originalIndices = OriginalIndices.readOriginalIndices(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { shardSearchRequest = in.readOptionalWriteable(ShardSearchRequest::new); rescoreDocIds = new RescoreDocIds(in); aggregatedDfs = in.readOptionalWriteable(AggregatedDfs::new); @@ -68,7 +68,7 @@ public ShardFetchSearchRequest(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); OriginalIndices.writeOriginalIndices(originalIndices, out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalWriteable(shardSearchRequest); rescoreDocIds.writeTo(out); out.writeOptionalWriteable(aggregatedDfs); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldAndFormat.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldAndFormat.java index 3fb9d8a1930c1..82b16a9aad03a 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldAndFormat.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/FieldAndFormat.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.fetch.subphase; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -133,7 +133,7 @@ public FieldAndFormat(String field, @Nullable String format, @Nullable Boolean i public FieldAndFormat(StreamInput in) throws IOException { this.field = in.readString(); format = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { this.includeUnmapped = in.readOptionalBoolean(); } else { this.includeUnmapped = null; @@ -144,7 +144,7 @@ public FieldAndFormat(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeOptionalString(format); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { out.writeOptionalBoolean(this.includeUnmapped); } } diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/LookupField.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/LookupField.java index e106a174e83e0..cf8287fed2d25 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/LookupField.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/LookupField.java @@ -28,7 +28,7 @@ public record LookupField(String targetIndex, QueryBuilder query, List fetchFields, int size) implements Writeable { public LookupField(StreamInput in) throws IOException { - this(in.readString(), in.readNamedWriteable(QueryBuilder.class), in.readList(FieldAndFormat::new), in.readVInt()); + this(in.readString(), in.readNamedWriteable(QueryBuilder.class), in.readCollectionAsList(FieldAndFormat::new), in.readVInt()); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java index b872197163c65..3ce5290b12100 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/AbstractHighlighterBuilder.java @@ -10,7 +10,7 @@ import org.apache.lucene.search.highlight.SimpleFragmenter; import org.apache.lucene.search.highlight.SimpleSpanFragmenter; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -143,7 +143,7 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { } order(in.readOptionalWriteable(Order::readFromStream)); highlightFilter(in.readOptionalBoolean()); - if (in.getTransportVersion().before(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_8_0)) { in.readOptionalBoolean(); // force_source, now deprecated } boundaryScannerType(in.readOptionalWriteable(BoundaryScannerType::readFromStream)); @@ -160,7 +160,7 @@ protected AbstractHighlighterBuilder(StreamInput in) throws IOException { options(in.readMap()); } requireFieldMatch(in.readOptionalBoolean()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { maxAnalyzedOffset(in.readOptionalInt()); } } @@ -183,7 +183,7 @@ public final void writeTo(StreamOutput out) throws IOException { } out.writeOptionalWriteable(order); out.writeOptionalBoolean(highlightFilter); - if (out.getTransportVersion().before(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_8_0)) { out.writeOptionalBoolean(false); } out.writeOptionalWriteable(boundaryScannerType); @@ -206,7 +206,7 @@ public final void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(options); } out.writeOptionalBoolean(requireFieldMatch); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeOptionalInt(maxAnalyzedOffset); } doWriteTo(out); diff --git a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java index bd0b3770f9f49..721a84efcd751 100644 --- a/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/fetch/subphase/highlight/HighlightBuilder.java @@ -128,7 +128,7 @@ public HighlightBuilder(StreamInput in) throws IOException { super(in); encoder(in.readOptionalString()); useExplicitFieldOrder(in.readBoolean()); - this.fields = in.readList(Field::new); + this.fields = in.readCollectionAsList(Field::new); assert this.equals(new HighlightBuilder(this, highlightQuery, fields)) : "copy constructor is broken"; } @@ -136,7 +136,7 @@ public HighlightBuilder(StreamInput in) throws IOException { protected void doWriteTo(StreamOutput out) throws IOException { out.writeOptionalString(encoder); out.writeBoolean(useExplicitFieldOrder); - out.writeList(fields); + out.writeCollection(fields); } /** diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java index 29dccd06605f5..d4888e79acae2 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchContextId.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.internal; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -34,12 +34,12 @@ public ShardSearchContextId(String sessionId, long id, String searcherId) { public ShardSearchContextId(StreamInput in) throws IOException { this.id = in.readLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { this.sessionId = in.readString(); } else { this.sessionId = ""; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { this.searcherId = in.readOptionalString(); } else { this.searcherId = null; @@ -49,10 +49,10 @@ public ShardSearchContextId(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(id); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { out.writeString(sessionId); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_12_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_12_0)) { out.writeOptionalString(searcherId); } } diff --git a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java index d8e9bd79312c1..842387c0ea13b 100644 --- a/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java +++ b/server/src/main/java/org/elasticsearch/search/internal/ShardSearchRequest.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; @@ -273,15 +274,16 @@ public ShardSearchRequest(StreamInput in) throws IOException { super(in); shardId = new ShardId(in); searchType = SearchType.fromId(in.readByte()); - shardRequestIndex = in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0) ? in.readVInt() : -1; + shardRequestIndex = in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0) ? in.readVInt() : -1; numberOfShards = in.readVInt(); scroll = in.readOptionalWriteable(Scroll::new); source = in.readOptionalWriteable(SearchSourceBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) && in.getTransportVersion().before(TransportVersion.V_8_500_013)) { - // to deserialize between the 8.8 and 8.500.013 version we need to translate + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) + && in.getTransportVersion().before(TransportVersions.V_8_500_020)) { + // to deserialize between the 8.8 and 8.500.020 version we need to translate // the rank queries into sub searches if we are ranking; if there are no rank queries // we deserialize the empty list and do nothing - List rankQueryBuilders = in.readNamedWriteableList(QueryBuilder.class); + List rankQueryBuilders = in.readNamedWriteableCollectionAsList(QueryBuilder.class); // if we are in the dfs phase in 8.8, we can have no rank queries // and if we are in the query/fetch phase we can have either no rank queries // for a standard query or hybrid search or 2+ rank queries, but we cannot have @@ -300,7 +302,7 @@ public ShardSearchRequest(StreamInput in) throws IOException { source.subSearches(subSearchSourceBuilders); } } - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types no longer relevant so ignore String[] types = in.readStringArray(); if (types.length > 0) { @@ -315,11 +317,11 @@ public ShardSearchRequest(StreamInput in) throws IOException { requestCache = in.readOptionalBoolean(); clusterAlias = in.readOptionalString(); allowPartialSearchResults = in.readBoolean(); - if (in.getTransportVersion().before(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().before(TransportVersions.V_7_11_0)) { in.readStringArray(); in.readOptionalString(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { canReturnNullResponseIfMatchNoDocs = in.readBoolean(); bottomSortValues = in.readOptionalWriteable(SearchSortValuesAndFormats::new); readerId = in.readOptionalWriteable(ShardSearchContextId::new); @@ -332,14 +334,14 @@ public ShardSearchRequest(StreamInput in) throws IOException { } assert keepAlive == null || readerId != null : "readerId: " + readerId + " keepAlive: " + keepAlive; channelVersion = TransportVersion.min(TransportVersion.readVersion(in), in.getTransportVersion()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { waitForCheckpoint = in.readLong(); waitForCheckpointsTimeout = in.readTimeValue(); } else { waitForCheckpoint = SequenceNumbers.UNASSIGNED_SEQ_NO; waitForCheckpointsTimeout = SearchService.NO_TIMEOUT; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { forceSyntheticSource = in.readBoolean(); } else { /* @@ -363,28 +365,28 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce shardId.writeTo(out); out.writeByte(searchType.id()); if (asKey == false) { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { out.writeVInt(shardRequestIndex); } out.writeVInt(numberOfShards); } out.writeOptionalWriteable(scroll); out.writeOptionalWriteable(source); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0) - && out.getTransportVersion().before(TransportVersion.V_8_500_013)) { - // to serialize between the 8.8 and 8.500.013 version we need to translate + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0) + && out.getTransportVersion().before(TransportVersions.V_8_500_020)) { + // to serialize between the 8.8 and 8.500.020 version we need to translate // the sub searches into rank queries if we are ranking, otherwise, we // ignore this because linear combination will have multiple sub searches in - // 8.500.013+, but only use the combined boolean query in prior versions + // 8.500.020+, but only use the combined boolean query in prior versions List rankQueryBuilders = new ArrayList<>(); if (source != null && source.rankBuilder() != null && source.subSearches().size() >= 2) { for (SubSearchSourceBuilder subSearchSourceBuilder : source.subSearches()) { rankQueryBuilders.add(subSearchSourceBuilder.getQueryBuilder()); } } - out.writeNamedWriteableList(rankQueryBuilders); + out.writeNamedWriteableCollection(rankQueryBuilders); } - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // types not supported so send an empty array to previous versions out.writeStringArray(Strings.EMPTY_ARRAY); } @@ -396,18 +398,18 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce out.writeOptionalBoolean(requestCache); out.writeOptionalString(clusterAlias); out.writeBoolean(allowPartialSearchResults); - if (asKey == false && out.getTransportVersion().before(TransportVersion.V_7_11_0)) { + if (asKey == false && out.getTransportVersion().before(TransportVersions.V_7_11_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); out.writeOptionalString(null); } - if (asKey == false && out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (asKey == false && out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { out.writeBoolean(canReturnNullResponseIfMatchNoDocs); out.writeOptionalWriteable(bottomSortValues); out.writeOptionalWriteable(readerId); out.writeOptionalTimeValue(keepAlive); } TransportVersion.writeVersion(channelVersion, out); - TransportVersion waitForCheckpointsVersion = TransportVersion.V_7_16_0; + TransportVersion waitForCheckpointsVersion = TransportVersions.V_7_16_0; if (out.getTransportVersion().onOrAfter(waitForCheckpointsVersion)) { out.writeLong(waitForCheckpoint); out.writeTimeValue(waitForCheckpointsTimeout); @@ -421,7 +423,7 @@ protected final void innerWriteTo(StreamOutput out, boolean asKey) throws IOExce + "] or greater." ); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeBoolean(forceSyntheticSource); } else { if (forceSyntheticSource) { diff --git a/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java b/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java index e34cc1a1bcd77..d2cd46bda315e 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/ProfileResult.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.profile; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -75,12 +75,12 @@ public ProfileResult(StreamInput in) throws IOException { this.description = in.readString(); this.nodeTime = in.readLong(); breakdown = in.readMap(StreamInput::readLong); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { debug = in.readMap(StreamInput::readGenericValue); } else { debug = Map.of(); } - children = in.readList(ProfileResult::new); + children = in.readCollectionAsList(ProfileResult::new); } @Override @@ -88,11 +88,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(type); out.writeString(description); out.writeLong(nodeTime); // not Vlong because can be negative - out.writeMap(breakdown, StreamOutput::writeString, StreamOutput::writeLong); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { - out.writeMap(debug, StreamOutput::writeString, StreamOutput::writeGenericValue); + out.writeMap(breakdown, StreamOutput::writeLong); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { + out.writeMap(debug, StreamOutput::writeGenericValue); } - out.writeList(children); + out.writeCollection(children); } /** diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java index 4d3205f185a60..4e301d5a3300d 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileDfsPhaseResult.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.profile; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -45,8 +45,8 @@ public SearchProfileDfsPhaseResult( public SearchProfileDfsPhaseResult(StreamInput in) throws IOException { dfsShardResult = in.readOptionalWriteable(ProfileResult::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - queryProfileShardResult = in.readOptionalList(QueryProfileShardResult::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { + queryProfileShardResult = in.readOptionalCollectionAsList(QueryProfileShardResult::new); } else { QueryProfileShardResult singleResult = in.readOptionalWriteable(QueryProfileShardResult::new); queryProfileShardResult = singleResult != null ? List.of(singleResult) : null; @@ -56,7 +56,7 @@ public SearchProfileDfsPhaseResult(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(dfsShardResult); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalCollection(queryProfileShardResult); } else { out.writeOptionalWriteable(combineQueryProfileShardResults()); diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileQueryPhaseResult.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileQueryPhaseResult.java index 04855716fe30a..6000147abbade 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileQueryPhaseResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileQueryPhaseResult.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.profile; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -42,7 +42,7 @@ public SearchProfileQueryPhaseResult( } public SearchProfileQueryPhaseResult(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { searchProfileDfsPhaseResult = in.readOptionalWriteable(SearchProfileDfsPhaseResult::new); } int profileSize = in.readVInt(); @@ -57,7 +57,7 @@ public SearchProfileQueryPhaseResult(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeOptionalWriteable(searchProfileDfsPhaseResult); } out.writeVInt(queryProfileResults.size()); diff --git a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java index bec08e0075c6d..d6975811091e3 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java +++ b/server/src/main/java/org/elasticsearch/search/profile/SearchProfileResults.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -56,7 +56,7 @@ public SearchProfileResults(Map shardResults) } public SearchProfileResults(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { shardResults = in.readMap(SearchProfileShardResult::new); } else { // Before 8.0.0 we only send the query phase result @@ -66,11 +66,11 @@ public SearchProfileResults(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { - out.writeMap(shardResults, StreamOutput::writeString, (o, r) -> r.writeTo(o)); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { + out.writeMap(shardResults, StreamOutput::writeWriteable); } else { // Before 8.0.0 we only send the query phase - out.writeMap(shardResults, StreamOutput::writeString, (o, r) -> r.getQueryPhase().writeTo(o)); + out.writeMap(shardResults, (o, r) -> r.getQueryPhase().writeTo(o)); } } diff --git a/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java b/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java index f9f85749bbd52..3c12b68196b35 100644 --- a/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java +++ b/server/src/main/java/org/elasticsearch/search/profile/query/CollectorResult.java @@ -56,7 +56,7 @@ public CollectorResult(String collectorName, String reason, long time, List> { public String toString() { return Strings.toString(this, true, true); } + + public boolean supportsParallelCollection() { + return true; + } } diff --git a/server/src/main/java/org/elasticsearch/search/sort/SortValue.java b/server/src/main/java/org/elasticsearch/search/sort/SortValue.java index 6c3ac59da6d45..067439931a85b 100644 --- a/server/src/main/java/org/elasticsearch/search/sort/SortValue.java +++ b/server/src/main/java/org/elasticsearch/search/sort/SortValue.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.sort; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; @@ -299,7 +299,7 @@ private BytesSortValue(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().before(TransportVersions.V_7_11_0)) { throw new IllegalArgumentException( "transport versions before [7110099] can't handle non-numeric sort values, attempted to send to [" + out.getTransportVersion() diff --git a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java index 4dad6f779375a..cfa568d67cefc 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/Suggest.java @@ -72,7 +72,7 @@ public Suggest(List>> suggestions) @SuppressWarnings({ "rawtypes", "unchecked" }) public Suggest(StreamInput in) throws IOException { - suggestions = (List) in.readNamedWriteableList(Suggestion.class); + suggestions = (List) in.readNamedWriteableCollectionAsList(Suggestion.class); hasScoreDocs = filter(CompletionSuggestion.class).stream().anyMatch(CompletionSuggestion::hasScoreDocs); } @@ -112,7 +112,7 @@ public boolean hasScoreDocs() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(suggestions); + out.writeNamedWriteableCollection(suggestions); } @Override @@ -317,7 +317,7 @@ public void trim() { public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeVInt(size); - out.writeList(entries); + out.writeCollection(entries); } @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java index 6d22de25279cf..86e18b3e5a406 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/completion/CompletionSuggestionBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -328,7 +329,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java index 61f81f77add06..fe85dd70b7337 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/Laplace.java @@ -12,6 +12,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; @@ -121,6 +122,6 @@ public WordScorerFactory buildWordScorerFactory() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java index 27f111fb6d827..9dff9561b9934 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/LinearInterpolation.java @@ -12,6 +12,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -178,6 +179,6 @@ public WordScorerFactory buildWordScorerFactory() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java index c2252023fde96..23c6d9db0ce2f 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/PhraseSuggestionBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.analysis.Analyzer; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -161,7 +162,7 @@ public void doWriteTo(StreamOutput out) throws IOException { } out.writeMapWithConsistentOrder(collateParams); out.writeOptionalBoolean(collatePrune); - out.writeMap(this.generators, StreamOutput::writeString, StreamOutput::writeList); + out.writeMap(this.generators, StreamOutput::writeCollection); } /** @@ -699,7 +700,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java index 0c18cc67f759e..fe2c2b660eb72 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/phrase/StupidBackoff.java @@ -12,6 +12,7 @@ import org.apache.lucene.index.Terms; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.suggest.phrase.WordScorer.WordScorerFactory; @@ -124,6 +125,6 @@ public WordScorerFactory buildWordScorerFactory() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java index e5c0915531646..8e28c65b8ca64 100644 --- a/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/suggest/term/TermSuggestionBuilder.java @@ -16,6 +16,7 @@ import org.apache.lucene.search.spell.StringDistance; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -459,7 +460,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java index 3825302fbc71e..4ecb33a907f1e 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnScoreDocQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.Lucene; @@ -148,6 +149,6 @@ protected int doHashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_4_0; + return TransportVersions.V_8_4_0; } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java index 37a56d05c2786..5ebea733f0e5e 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnSearchBuilder.java @@ -9,7 +9,7 @@ package org.elasticsearch.search.vectors; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -206,15 +206,15 @@ public KnnSearchBuilder(StreamInput in) throws IOException { this.k = in.readVInt(); this.numCands = in.readVInt(); this.queryVector = in.readFloatArray(); - this.filterQueries = in.readNamedWriteableList(QueryBuilder.class); + this.filterQueries = in.readNamedWriteableCollectionAsList(QueryBuilder.class); this.boost = in.readFloat(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.queryVectorBuilder = in.readOptionalNamedWriteable(QueryVectorBuilder.class); } else { this.queryVectorBuilder = null; } this.querySupplier = null; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.similarity = in.readOptionalFloat(); } else { this.similarity = null; @@ -375,9 +375,9 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(k); out.writeVInt(numCands); out.writeFloatArray(queryVector); - out.writeNamedWriteableList(filterQueries); + out.writeNamedWriteableCollection(filterQueries); out.writeFloat(boost); - if (out.getTransportVersion().before(TransportVersion.V_8_7_0) && queryVectorBuilder != null) { + if (out.getTransportVersion().before(TransportVersions.V_8_7_0) && queryVectorBuilder != null) { throw new IllegalArgumentException( format( "cannot serialize [%s] to older node of version [%s]", @@ -386,10 +386,10 @@ public void writeTo(StreamOutput out) throws IOException { ) ); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalNamedWriteable(queryVectorBuilder); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalFloat(similarity); } } diff --git a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java index 33cc679757bcc..c4b1f3046e89d 100644 --- a/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java +++ b/server/src/main/java/org/elasticsearch/search/vectors/KnnVectorQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -80,19 +81,19 @@ public KnnVectorQueryBuilder(StreamInput in) throws IOException { super(in); this.fieldName = in.readString(); this.numCands = in.readVInt(); - if (in.getTransportVersion().before(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_7_0)) { this.queryVector = in.readFloatArray(); this.byteQueryVector = null; } else { this.queryVector = in.readBoolean() ? in.readFloatArray() : null; this.byteQueryVector = in.readBoolean() ? in.readByteArray() : null; } - if (in.getTransportVersion().before(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_2_0)) { this.filterQueries = new ArrayList<>(); } else { this.filterQueries = readQueries(in); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.vectorSimilarity = in.readOptionalFloat(); } else { this.vectorSimilarity = null; @@ -142,7 +143,7 @@ public KnnVectorQueryBuilder addFilterQueries(List filterQueries) protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeVInt(numCands); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { boolean queryVectorNotNull = queryVector != null; out.writeBoolean(queryVectorNotNull); if (queryVectorNotNull) { @@ -165,10 +166,10 @@ protected void doWriteTo(StreamOutput out) throws IOException { } out.writeFloatArray(f); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { writeQueries(out, filterQueries); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalFloat(vectorSimilarity); } } @@ -270,6 +271,6 @@ protected boolean doEquals(KnnVectorQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java index 2356181a0c426..58ccb4a82ac7d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreInfo.java @@ -39,7 +39,7 @@ public RestoreInfo(String name, List indices, int totalShards, int succe public RestoreInfo(StreamInput in) throws IOException { name = in.readString(); - indices = in.readImmutableStringList(); + indices = in.readStringCollectionAsImmutableList(); totalShards = in.readVInt(); successfulShards = in.readVInt(); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 335d69c83a4f5..589163ab00581 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -10,7 +10,6 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; import org.elasticsearch.action.support.IndicesOptions; @@ -970,10 +969,10 @@ static void validateSnapshotRestorable( if (IndexVersion.current().before(snapshotInfo.version())) { throw new SnapshotRestoreException( new Snapshot(repository.name(), snapshotInfo.snapshotId()), - "the snapshot was created with Elasticsearch version [" + "the snapshot was created with index version [" + snapshotInfo.version() - + "] which is higher than the version of this node [" - + Version.CURRENT + + "] which is higher than the version used by this node [" + + IndexVersion.current() + "]" ); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotFeatureInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotFeatureInfo.java index edfe511002c04..7e0e495d193fc 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotFeatureInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotFeatureInfo.java @@ -48,7 +48,7 @@ public SnapshotFeatureInfo(String pluginName, List indices) { } public SnapshotFeatureInfo(final StreamInput in) throws IOException { - this(in.readString(), in.readImmutableStringList()); + this(in.readString(), in.readStringCollectionAsImmutableList()); } @Override diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java index 32cd9b8b74462..6414e0d2f1779 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java @@ -502,19 +502,19 @@ public static SnapshotInfo readFrom(final StreamInput in) throws IOException { } else { snapshot = new Snapshot(UNKNOWN_REPO_NAME, new SnapshotId(in)); } - final List indices = in.readImmutableStringList(); + final List indices = in.readStringCollectionAsImmutableList(); final SnapshotState state = in.readBoolean() ? SnapshotState.fromValue(in.readByte()) : null; final String reason = in.readOptionalString(); final long startTime = in.readVLong(); final long endTime = in.readVLong(); final int totalShards = in.readVInt(); final int successfulShards = in.readVInt(); - final List shardFailures = in.readImmutableList(SnapshotShardFailure::new); + final List shardFailures = in.readCollectionAsImmutableList(SnapshotShardFailure::new); final IndexVersion version = in.readBoolean() ? IndexVersion.readVersion(in) : null; final Boolean includeGlobalState = in.readOptionalBoolean(); final Map userMetadata = in.readMap(); - final List dataStreams = in.readImmutableStringList(); - final List featureStates = in.readImmutableList(SnapshotFeatureInfo::new); + final List dataStreams = in.readStringCollectionAsImmutableList(); + final List featureStates = in.readCollectionAsImmutableList(SnapshotFeatureInfo::new); final Map indexSnapshotDetails = in.readImmutableMap(IndexSnapshotDetails::new); return new SnapshotInfo( snapshot, @@ -1032,7 +1032,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(endTime); out.writeVInt(totalShards); out.writeVInt(successfulShards); - out.writeList(shardFailures); + out.writeCollection(shardFailures); if (version != null) { out.writeBoolean(true); IndexVersion.writeVersion(version, out); @@ -1042,9 +1042,9 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeOptionalBoolean(includeGlobalState); out.writeGenericMap(userMetadata); out.writeStringCollection(dataStreams); - out.writeList(featureStates); + out.writeCollection(featureStates); - out.writeMap(indexSnapshotDetails, StreamOutput::writeString, (stream, value) -> value.writeTo(stream)); + out.writeMap(indexSnapshotDetails, StreamOutput::writeWriteable); } private static SnapshotState snapshotState(final String reason, final List shardFailures) { diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index e4dd9e6310c88..5fc4353a68230 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -13,7 +13,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionRunnable; @@ -134,7 +134,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus public static final IndexVersion INDEX_GEN_IN_REPO_DATA_VERSION = IndexVersion.V_7_9_0; public static final IndexVersion UUIDS_IN_REPO_DATA_VERSION = IndexVersion.V_7_12_0; - public static final TransportVersion UUIDS_IN_REPO_DATA_TRANSPORT_VERSION = TransportVersion.V_7_12_0; + public static final TransportVersion UUIDS_IN_REPO_DATA_TRANSPORT_VERSION = TransportVersions.V_7_12_0; public static final IndexVersion FILE_INFO_WRITER_UUIDS_IN_SHARD_DATA_VERSION = IndexVersion.V_7_16_0; @@ -2115,8 +2115,9 @@ private void addDeleteListener(String deleteUUID, ActionListener listener) } /** - * Determines the minimum {@link Version} that the snapshot repository must be compatible with from the current nodes in the cluster - * and the contents of the repository. The minimum version is determined as the lowest version found across all snapshots in the + * Determines the minimum {@link IndexVersion} that the snapshot repository must be compatible with + * from the current nodes in the cluster and the contents of the repository. + * The minimum version is determined as the lowest version found across all snapshots in the * repository and all nodes in the cluster. * * @param minNodeVersion minimum node version in the cluster diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java b/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java index 3872a2f017a88..39a76f3508daa 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskCancellationService.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ResultDeduplicator; import org.elasticsearch.action.support.ChannelActionListener; @@ -47,7 +48,7 @@ public class TaskCancellationService { public static final String BAN_PARENT_ACTION_NAME = "internal:admin/tasks/ban"; public static final String CANCEL_CHILD_ACTION_NAME = "internal:admin/tasks/cancel_child"; - public static final TransportVersion VERSION_SUPPORTING_CANCEL_CHILD_ACTION = TransportVersion.V_8_8_0; + public static final TransportVersion VERSION_SUPPORTING_CANCEL_CHILD_ACTION = TransportVersions.V_8_8_0; private static final Logger logger = LogManager.getLogger(TaskCancellationService.class); private final TransportService transportService; private final TaskManager taskManager; @@ -317,7 +318,7 @@ private BanParentTaskRequest(StreamInput in) throws IOException { parentTaskId = TaskId.readFromStream(in); ban = in.readBoolean(); reason = ban ? in.readString() : null; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { waitForCompletion = in.readBoolean(); } else { waitForCompletion = false; @@ -332,7 +333,7 @@ public void writeTo(StreamOutput out) throws IOException { if (ban) { out.writeString(reason); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { out.writeBoolean(waitForCompletion); } } diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java index d30a6e612daa1..e0ef4feb0ae35 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.tasks; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -67,7 +67,7 @@ public static TaskInfo from(StreamInput in) throws IOException { return new TaskInfo( taskId, in.readString(), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_055) ? in.readString() : taskId.getNodeId(), + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_055) ? in.readString() : taskId.getNodeId(), in.readString(), in.readOptionalString(), in.readOptionalNamedWriteable(Task.Status.class), @@ -84,7 +84,7 @@ public static TaskInfo from(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { taskId.writeTo(out); out.writeString(type); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_055)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_055)) { out.writeString(node); } out.writeString(action); @@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(cancellable); out.writeBoolean(cancelled); parentTaskId.writeTo(out); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); } public long id() { diff --git a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java index 1e42417aa2471..b0da930cd17b6 100644 --- a/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java +++ b/server/src/main/java/org/elasticsearch/tasks/TaskResultsService.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.indices.SystemIndexDescriptor; @@ -103,7 +104,7 @@ public void onFailure(Exception e) { } else { TimeValue wait = backoff.next(); logger.warn(() -> "failed to store task result, retrying in [" + wait + "]", e); - threadPool.schedule(() -> doStoreResult(backoff, index, listener), wait, ThreadPool.Names.SAME); + threadPool.schedule(() -> doStoreResult(backoff, index, listener), wait, EsExecutors.DIRECT_EXECUTOR_SERVICE); } } }); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ScheduledExecutorServiceScheduler.java b/server/src/main/java/org/elasticsearch/threadpool/ScheduledExecutorServiceScheduler.java new file mode 100644 index 0000000000000..cc7aad76cf6b7 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/threadpool/ScheduledExecutorServiceScheduler.java @@ -0,0 +1,34 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.threadpool; + +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.core.TimeValue; + +import java.util.concurrent.Executor; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +/** + * A {@link Scheduler} which wraps a {@link ScheduledExecutorService}. It always runs the delayed command on the scheduler thread, so the + * provided {@link Executor} must always be {@link EsExecutors#DIRECT_EXECUTOR_SERVICE}. + */ +public final class ScheduledExecutorServiceScheduler implements Scheduler { + private final ScheduledExecutorService executor; + + public ScheduledExecutorServiceScheduler(ScheduledExecutorService executor) { + this.executor = executor; + } + + @Override + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor unused) { + assert unused == EsExecutors.DIRECT_EXECUTOR_SERVICE : "ScheduledExecutorServiceScheduler never forks, don't even try"; + return Scheduler.wrapAsScheduledCancellable(executor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS)); + } +} diff --git a/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java b/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java index 4922d75d90597..438a32a2b8630 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java +++ b/server/src/main/java/org/elasticsearch/threadpool/Scheduler.java @@ -18,6 +18,7 @@ import org.elasticsearch.core.TimeValue; import java.util.concurrent.Delayed; +import java.util.concurrent.Executor; import java.util.concurrent.Future; import java.util.concurrent.RejectedExecutionHandler; import java.util.concurrent.RunnableFuture; @@ -80,19 +81,19 @@ static boolean awaitTermination( /** * Schedules a one-shot command to be run after a given delay. The command is run in the context of the calling thread. - * The command runs on scheduler thread. Do not run blocking calls on the scheduler thread. Subclasses may allow - * to execute on a different executor, in which case blocking calls are allowed. + * Implementations may choose to run the command on the given {@code executor} or on the scheduler thread. If {@code executor} is {@link + * EsExecutors#DIRECT_EXECUTOR_SERVICE} then the command runs on the scheduler thread in all cases. Do not run blocking calls on the + * scheduler thread. * * @param command the command to run * @param delay delay before the task executes - * @param executor the name of the executor that has to execute this task. Ignored in the default implementation but can be used - * by subclasses that support multiple executors. - * @return a ScheduledFuture who's get will return when the task has been added to its target thread pool and throws an exception if - * the task is canceled before it was added to its target thread pool. Once the task has been added to its target thread pool - * the ScheduledFuture cannot interact with it. + * @param executor the executor that has to execute this task. + * @return a ScheduledFuture whose {@link ScheduledFuture#get()} will return when the task has been added to its target thread pool and + * throws an exception if the task is canceled before it was added to its target thread pool. Once the task has been added to + * its target thread pool the ScheduledFuture cannot interact with it. * @throws EsRejectedExecutionException if the task cannot be scheduled for execution */ - ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor); + ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor); /** * Schedules a periodic action that runs on scheduler thread. Do not run blocking calls on the scheduler thread. Subclasses may allow @@ -105,7 +106,7 @@ static boolean awaitTermination( * @return a {@link Cancellable} that can be used to cancel the subsequent runs of the command. If the command is running, it will * not be interrupted. */ - default Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) { + default Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, Executor executor) { var runnable = new ReschedulingRunnable(command, interval, executor, this, (e) -> {}, (e) -> {}); runnable.start(); return runnable; @@ -165,7 +166,7 @@ final class ReschedulingRunnable extends AbstractRunnable implements Cancellable private final Runnable runnable; private final TimeValue interval; - private final String executor; + private final Executor executor; private final Scheduler scheduler; private final Consumer rejectionConsumer; private final Consumer failureConsumer; @@ -183,7 +184,7 @@ final class ReschedulingRunnable extends AbstractRunnable implements Cancellable ReschedulingRunnable( Runnable runnable, TimeValue interval, - String executor, + Executor executor, Scheduler scheduler, Consumer rejectionConsumer, Consumer failureConsumer diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java index eafa676bb7293..bfcd8c8a396f5 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPool.java @@ -467,11 +467,11 @@ public ExecutorService executor(String name) { * @throws org.elasticsearch.common.util.concurrent.EsRejectedExecutionException if the task cannot be scheduled for execution */ @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { final Runnable contextPreservingRunnable = threadContext.preserveContext(command); final Runnable toSchedule; - if (Names.SAME.equals(executor) == false) { - toSchedule = new ThreadedRunnable(contextPreservingRunnable, executor(executor)); + if (executor != EsExecutors.DIRECT_EXECUTOR_SERVICE) { + toSchedule = new ThreadedRunnable(contextPreservingRunnable, executor); } else if (slowSchedulerWarnThresholdNanos > 0) { toSchedule = new Runnable() { @Override @@ -503,7 +503,7 @@ public String toString() { return new ScheduledCancellableAdapter(scheduler.schedule(toSchedule, delay.millis(), TimeUnit.MILLISECONDS)); } - public void scheduleUnlessShuttingDown(TimeValue delay, String executor, Runnable command) { + public void scheduleUnlessShuttingDown(TimeValue delay, Executor executor, Runnable command) { try { schedule(command, delay, executor); } catch (EsRejectedExecutionException e) { @@ -523,8 +523,7 @@ public void scheduleUnlessShuttingDown(TimeValue delay, String executor, Runnabl } } - @Override - public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) { + public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, Executor executor) { var runnable = new ReschedulingRunnable(command, interval, executor, this, (e) -> { if (logger.isDebugEnabled()) { logger.debug(() -> format("scheduled task [%s] was rejected on thread pool [%s]", command, executor), e); diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java index 6e809aec0705a..2a7a29d9fc5e0 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolInfo.java @@ -26,12 +26,12 @@ public ThreadPoolInfo(List infos) { } public ThreadPoolInfo(StreamInput in) throws IOException { - this.infos = in.readImmutableList(ThreadPool.Info::new); + this.infos = in.readCollectionAsImmutableList(ThreadPool.Info::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(infos); + out.writeCollection(infos); } @Override diff --git a/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java b/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java index 2457f68288698..6b4fab73f3ee0 100644 --- a/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java +++ b/server/src/main/java/org/elasticsearch/threadpool/ThreadPoolStats.java @@ -136,7 +136,7 @@ public Iterator toXContentChunked(ToXContent.Params outerP } public ThreadPoolStats(StreamInput in) throws IOException { - this(in.readList(Stats::new)); + this(in.readCollectionAsList(Stats::new)); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java index f04089ade2d92..bba4a2a6b86b7 100644 --- a/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ActionTransportException.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.NetworkAddress; @@ -24,7 +24,7 @@ public class ActionTransportException extends TransportException { public ActionTransportException(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_1_0)) { in.readOptionalWriteable(TransportAddress::new); in.readOptionalString(); } @@ -45,7 +45,7 @@ public ActionTransportException(String name, InetSocketAddress address, String a @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); - if (out.getTransportVersion().before(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_1_0)) { out.writeMissingWriteable(TransportAddress.class); out.writeMissingString(); // action } diff --git a/server/src/main/java/org/elasticsearch/transport/Compression.java b/server/src/main/java/org/elasticsearch/transport/Compression.java index a0290a8c92af6..6dc6115a3565e 100644 --- a/server/src/main/java/org/elasticsearch/transport/Compression.java +++ b/server/src/main/java/org/elasticsearch/transport/Compression.java @@ -13,6 +13,7 @@ import net.jpountz.lz4.LZ4FastDecompressor; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.core.Booleans; import org.elasticsearch.lz4.ESLZ4Compressor; @@ -27,7 +28,7 @@ public enum Scheme { LZ4, DEFLATE; - static final TransportVersion LZ4_VERSION = TransportVersion.V_7_14_0; + static final TransportVersion LZ4_VERSION = TransportVersions.V_7_14_0; static final int HEADER_LENGTH = 4; private static final byte[] DEFLATE_HEADER = new byte[] { 'D', 'F', 'L', '\0' }; private static final byte[] LZ4_HEADER = new byte[] { 'L', 'Z', '4', '\0' }; diff --git a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java index 1fa5fc61f07a4..ec7ef64e56ed2 100644 --- a/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java +++ b/server/src/main/java/org/elasticsearch/transport/ConnectTransportException.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -35,7 +35,7 @@ public ConnectTransportException(DiscoveryNode node, String msg, String action, public ConnectTransportException(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_1_0)) { in.readOptionalWriteable(DiscoveryNode::new); } } @@ -43,7 +43,7 @@ public ConnectTransportException(StreamInput in) throws IOException { @Override protected void writeTo(StreamOutput out, Writer nestedExceptionsWriter) throws IOException { super.writeTo(out, nestedExceptionsWriter); - if (out.getTransportVersion().before(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_1_0)) { out.writeMissingWriteable(DiscoveryNode.class); } } diff --git a/server/src/main/java/org/elasticsearch/transport/Header.java b/server/src/main/java/org/elasticsearch/transport/Header.java index 2c947e80e96d6..050d18e153bd8 100644 --- a/server/src/main/java/org/elasticsearch/transport/Header.java +++ b/server/src/main/java/org/elasticsearch/transport/Header.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; @@ -49,7 +50,7 @@ long getRequestId() { return requestId; } - boolean isRequest() { + public boolean isRequest() { return TransportStatus.isRequest(status); } @@ -61,7 +62,7 @@ boolean isError() { return TransportStatus.isError(status); } - boolean isHandshake() { + public boolean isHandshake() { return TransportStatus.isHandshake(status); } @@ -77,6 +78,11 @@ public Compression.Scheme getCompressionScheme() { return compressionScheme; } + public Map getRequestHeaders() { + var allHeaders = getHeaders(); + return allHeaders == null ? null : allHeaders.v1(); + } + boolean needsToReadVariableHeader() { return headers == null; } @@ -89,7 +95,7 @@ void finishParsingHeader(StreamInput input) throws IOException { this.headers = ThreadContext.readHeadersFromStream(input); if (isRequest()) { - if (version.before(TransportVersion.V_8_0_0)) { + if (version.before(TransportVersions.V_8_0_0)) { // discard features input.readStringArray(); } diff --git a/server/src/main/java/org/elasticsearch/transport/HeaderValidationException.java b/server/src/main/java/org/elasticsearch/transport/HeaderValidationException.java new file mode 100644 index 0000000000000..03cf76e31ec08 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/transport/HeaderValidationException.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +/** + * This is used to pack the validation exception with the associated header. + */ +public class HeaderValidationException extends RuntimeException { + public final Header header; + public final Exception validationException; + + public HeaderValidationException(Header header, Exception validationException) { + this.header = header; + this.validationException = validationException; + } +} diff --git a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java index b0298dd2ec477..056af07b13912 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundDecoder.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -35,14 +36,20 @@ public class InboundDecoder implements Releasable { private boolean isCompressed = false; private boolean isClosed = false; private final ByteSizeValue maxHeaderSize; + private final ChannelType channelType; public InboundDecoder(Recycler recycler) { - this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB)); + this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB), ChannelType.MIX); } - public InboundDecoder(Recycler recycler, ByteSizeValue maxHeaderSize) { + public InboundDecoder(Recycler recycler, ChannelType channelType) { + this(recycler, new ByteSizeValue(2, ByteSizeUnit.GB), channelType); + } + + public InboundDecoder(Recycler recycler, ByteSizeValue maxHeaderSize, ChannelType channelType) { this.recycler = recycler; this.maxHeaderSize = maxHeaderSize; + this.channelType = channelType; } public int decode(ReleasableBytesReference reference, Consumer fragmentConsumer) throws IOException { @@ -70,7 +77,7 @@ public int internalDecode(ReleasableBytesReference reference, Consumer f } else { totalNetworkSize = messageLength + TcpHeader.BYTES_REQUIRED_FOR_MESSAGE_SIZE; - Header header = readHeader(messageLength, reference); + Header header = readHeader(messageLength, reference, channelType); bytesConsumed += headerBytesToRead; if (header.isCompressed()) { isCompressed = true; @@ -186,7 +193,7 @@ private static int headerBytesToRead(BytesReference reference, ByteSizeValue max } } - private static Header readHeader(int networkMessageSize, BytesReference bytesReference) throws IOException { + private static Header readHeader(int networkMessageSize, BytesReference bytesReference, ChannelType channelType) throws IOException { try (StreamInput streamInput = bytesReference.streamInput()) { streamInput.skip(TcpHeader.BYTES_REQUIRED_FOR_MESSAGE_SIZE); long requestId = streamInput.readLong(); @@ -194,6 +201,11 @@ private static Header readHeader(int networkMessageSize, BytesReference bytesRef int remoteVersion = streamInput.readInt(); Header header = new Header(networkMessageSize, requestId, status, TransportVersion.fromId(remoteVersion)); + if (channelType == ChannelType.SERVER && header.isResponse()) { + throw new IllegalArgumentException("server channels do not accept inbound responses, only requests, closing channel"); + } else if (channelType == ChannelType.CLIENT && header.isRequest()) { + throw new IllegalArgumentException("client channels do not accept inbound requests, only responses, closing channel"); + } if (header.isHandshake()) { checkHandshakeVersionCompatibility(header.getVersion()); } else { @@ -236,9 +248,15 @@ static void checkVersionCompatibility(TransportVersion remoteVersion) { "Received message from unsupported version: [" + remoteVersion + "] minimal compatible version is: [" - + TransportVersion.MINIMUM_COMPATIBLE + + TransportVersions.MINIMUM_COMPATIBLE + "]" ); } } + + public enum ChannelType { + SERVER, + CLIENT, + MIX + } } diff --git a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java index 2080f89d25ccc..171f693905b61 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundHandler.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundHandler.java @@ -295,7 +295,7 @@ private void handleRequestForking(T request, Reques boolean success = false; request.incRef(); try { - threadPool.executor(reg.getExecutor()).execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { + reg.getExecutor().execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { @Override protected void doRun() { doHandleRequest(reg, request, channel); diff --git a/server/src/main/java/org/elasticsearch/transport/InboundPipeline.java b/server/src/main/java/org/elasticsearch/transport/InboundPipeline.java index 085c25ef08929..b5b3baaebd1ca 100644 --- a/server/src/main/java/org/elasticsearch/transport/InboundPipeline.java +++ b/server/src/main/java/org/elasticsearch/transport/InboundPipeline.java @@ -109,8 +109,7 @@ public void doHandleBytes(TcpChannel channel, ReleasableBytesReference reference private void forwardFragments(TcpChannel channel, ArrayList fragments) throws IOException { for (Object fragment : fragments) { if (fragment instanceof Header) { - assert aggregator.isAggregating() == false; - aggregator.headerReceived((Header) fragment); + headerReceived((Header) fragment); } else if (fragment instanceof Compression.Scheme) { assert aggregator.isAggregating(); aggregator.updateCompressionScheme((Compression.Scheme) fragment); @@ -134,6 +133,11 @@ private void forwardFragments(TcpChannel channel, ArrayList fragments) t } } + protected void headerReceived(Header header) { + assert aggregator.isAggregating() == false; + aggregator.headerReceived(header); + } + private static boolean endOfMessage(Object fragment) { return fragment == InboundDecoder.PING || fragment == InboundDecoder.END_CONTENT || fragment instanceof Exception; } diff --git a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java index 492aa82a8eab2..07fd2a1197c15 100644 --- a/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java +++ b/server/src/main/java/org/elasticsearch/transport/OutboundMessage.java @@ -8,6 +8,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -128,7 +129,7 @@ static class Request extends OutboundMessage { @Override protected void writeVariableHeader(StreamOutput stream) throws IOException { super.writeVariableHeader(stream); - if (version.before(TransportVersion.V_8_0_0)) { + if (version.before(TransportVersions.V_8_0_0)) { // empty features array stream.writeStringArray(Strings.EMPTY_ARRAY); } diff --git a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java index b1758715998d0..83a0860ba6324 100644 --- a/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/ProxyConnectionStrategy.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterName; @@ -359,7 +359,7 @@ public ProxyModeInfo(String address, String serverName, int maxSocketConnections private ProxyModeInfo(StreamInput input) throws IOException { address = input.readString(); - if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { serverName = input.readString(); } else { serverName = null; @@ -380,7 +380,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(address); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { out.writeString(serverName); } out.writeVInt(maxSocketConnections); diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java index a8a4925441709..df0bb0174e542 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterAware.java @@ -21,10 +21,8 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Set; import java.util.stream.Collectors; -import java.util.stream.Stream; /** * Base class for all services and components that need up-to-date information about the registered remote clusters @@ -149,7 +147,7 @@ void validateAndUpdateRemoteCluster(String clusterAlias, Settings settings) { * Registers this instance to listen to updates on the cluster settings. */ public void listenForUpdates(ClusterSettings clusterSettings) { - List> remoteClusterSettings = Stream.of( + List> remoteClusterSettings = List.of( RemoteClusterService.REMOTE_CLUSTER_COMPRESS, RemoteClusterService.REMOTE_CLUSTER_PING_SCHEDULE, RemoteConnectionStrategy.REMOTE_CONNECTION_MODE, @@ -159,7 +157,7 @@ public void listenForUpdates(ClusterSettings clusterSettings) { ProxyConnectionStrategy.PROXY_ADDRESS, ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS, ProxyConnectionStrategy.SERVER_NAME - ).filter(Objects::nonNull).collect(Collectors.toList()); + ); clusterSettings.addAffixGroupUpdateConsumer(remoteClusterSettings, this::validateAndUpdateRemoteCluster); } diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java index 3c69312f4da06..814b17bac95ef 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterPortSettings.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; @@ -38,7 +39,7 @@ */ public class RemoteClusterPortSettings { - public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersion.V_8_500_059; + public static final TransportVersion TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY = TransportVersions.V_8_500_059; public static final String REMOTE_CLUSTER_PROFILE = "_remote_cluster"; public static final String REMOTE_CLUSTER_PREFIX = "remote_cluster."; diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java index 4a3ea5b61e51c..9542d4b366ded 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteClusterService.java @@ -309,13 +309,13 @@ protected void updateRemoteCluster(String clusterAlias, Settings settings) { CountDownLatch latch = new CountDownLatch(1); updateRemoteCluster(clusterAlias, settings, ActionListener.runAfter(new ActionListener<>() { @Override - public void onResponse(Void o) { - logger.debug("connected to new remote cluster [{}]", clusterAlias); + public void onResponse(RemoteClusterConnectionStatus status) { + logger.info("remote cluster connection [{}] updated: {}", clusterAlias, status); } @Override public void onFailure(Exception e) { - logger.debug(() -> "connection to new remote cluster [" + clusterAlias + "] failed", e); + logger.warn(() -> "failed to update remote cluster connection [" + clusterAlias + "]", e); } }, latch::countDown)); @@ -324,7 +324,7 @@ public void onFailure(Exception e) { // are on the cluster state thread and our custom future implementation will throw an // assertion. if (latch.await(10, TimeUnit.SECONDS) == false) { - logger.warn("failed to connect to new remote cluster [{}] within {}", clusterAlias, TimeValue.timeValueSeconds(10)); + logger.warn("failed to update remote cluster connection [{}] within {}", clusterAlias, TimeValue.timeValueSeconds(10)); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -338,7 +338,11 @@ public void onFailure(Exception e) { * @param newSettings the updated settings for the remote connection * @param listener a listener invoked once every configured cluster has been connected to */ - synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, ActionListener listener) { + synchronized void updateRemoteCluster( + String clusterAlias, + Settings newSettings, + ActionListener listener + ) { if (LOCAL_CLUSTER_GROUP_KEY.equals(clusterAlias)) { throw new IllegalArgumentException("remote clusters must not have the empty string as its key"); } @@ -351,7 +355,7 @@ synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, logger.warn("failed to close remote cluster connections for cluster: " + clusterAlias, e); } remoteClusters.remove(clusterAlias); - listener.onResponse(null); + listener.onResponse(RemoteClusterConnectionStatus.DISCONNECTED); return; } @@ -365,7 +369,7 @@ synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, credentialsProtectedRemoteClusters.contains(clusterAlias) ); remoteClusters.put(clusterAlias, remote); - remote.ensureConnected(listener); + remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.CONNECTED)); } else if (remote.shouldRebuildConnection(newSettings)) { // Changes to connection configuration. Must tear down existing connection try { @@ -382,13 +386,20 @@ synchronized void updateRemoteCluster(String clusterAlias, Settings newSettings, credentialsProtectedRemoteClusters.contains(clusterAlias) ); remoteClusters.put(clusterAlias, remote); - remote.ensureConnected(listener); + remote.ensureConnected(listener.map(ignored -> RemoteClusterConnectionStatus.RECONNECTED)); } else { // No changes to connection configuration. - listener.onResponse(null); + listener.onResponse(RemoteClusterConnectionStatus.UNCHANGED); } } + enum RemoteClusterConnectionStatus { + CONNECTED, + DISCONNECTED, + RECONNECTED, + UNCHANGED + } + /** * Connects to all remote clusters in a blocking fashion. This should be called on node startup to establish an initial connection * to all configured seed nodes. @@ -404,7 +415,7 @@ void initializeRemoteClusters() { CountDownActionListener listener = new CountDownActionListener(enabledClusters.size(), future); for (String clusterAlias : enabledClusters) { - updateRemoteCluster(clusterAlias, settings, listener); + updateRemoteCluster(clusterAlias, settings, listener.map(ignored -> null)); } if (enabledClusters.isEmpty()) { diff --git a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java index 608ca2a001ac9..8e0b17b50fbaf 100644 --- a/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java +++ b/server/src/main/java/org/elasticsearch/transport/RemoteConnectionInfo.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -48,13 +48,13 @@ public RemoteConnectionInfo( } public RemoteConnectionInfo(StreamInput input) throws IOException { - if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { RemoteConnectionStrategy.ConnectionStrategy mode = input.readEnum(RemoteConnectionStrategy.ConnectionStrategy.class); modeInfo = mode.getReader().read(input); initialConnectionTimeout = input.readTimeValue(); clusterAlias = input.readString(); skipUnavailable = input.readBoolean(); - if (input.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { hasClusterCredentials = input.readBoolean(); } else { hasClusterCredentials = false; @@ -89,14 +89,14 @@ public boolean hasClusterCredentials() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeEnum(modeInfo.modeType()); modeInfo.writeTo(out); out.writeTimeValue(initialConnectionTimeout); } else { if (modeInfo.modeType() == RemoteConnectionStrategy.ConnectionStrategy.SNIFF) { SniffConnectionStrategy.SniffModeInfo sniffInfo = (SniffConnectionStrategy.SniffModeInfo) this.modeInfo; - out.writeStringArray(sniffInfo.seedNodes.toArray(new String[0])); + out.writeStringCollection(sniffInfo.seedNodes); out.writeVInt(sniffInfo.maxConnectionsPerCluster); out.writeTimeValue(initialConnectionTimeout); out.writeVInt(sniffInfo.numNodesConnected); @@ -109,7 +109,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeString(clusterAlias); out.writeBoolean(skipUnavailable); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(hasClusterCredentials); } } diff --git a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java index 8dc67fb3896c5..09581fcc4f1e3 100644 --- a/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java +++ b/server/src/main/java/org/elasticsearch/transport/RequestHandlerRegistry.java @@ -18,6 +18,7 @@ import org.elasticsearch.tracing.Tracer; import java.io.IOException; +import java.util.concurrent.Executor; import static org.elasticsearch.core.Releasables.assertOnce; @@ -27,7 +28,7 @@ public class RequestHandlerRegistry implements private final TransportRequestHandler handler; private final boolean forceExecution; private final boolean canTripCircuitBreaker; - private final String executor; + private final Executor executor; private final TaskManager taskManager; private final Tracer tracer; private final Writeable.Reader requestReader; @@ -38,7 +39,7 @@ public RequestHandlerRegistry( Writeable.Reader requestReader, TaskManager taskManager, TransportRequestHandler handler, - String executor, + Executor executor, boolean forceExecution, boolean canTripCircuitBreaker, Tracer tracer @@ -86,7 +87,7 @@ public boolean canTripCircuitBreaker() { return canTripCircuitBreaker; } - public String getExecutor() { + public Executor getExecutor() { return executor; } diff --git a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java index 7f91ca9ee0edb..632828c8df901 100644 --- a/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java +++ b/server/src/main/java/org/elasticsearch/transport/SniffConnectionStrategy.java @@ -606,7 +606,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeStringArray(seedNodes.toArray(new String[0])); + out.writeStringCollection(seedNodes); out.writeVInt(maxConnectionsPerCluster); out.writeVInt(numNodesConnected); } diff --git a/server/src/main/java/org/elasticsearch/transport/TcpHeader.java b/server/src/main/java/org/elasticsearch/transport/TcpHeader.java index b5020be7a9924..f50a6518a8001 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpHeader.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpHeader.java @@ -9,13 +9,14 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; public class TcpHeader { - public static final TransportVersion VERSION_WITH_HEADER_SIZE = TransportVersion.V_7_6_0; + public static final TransportVersion VERSION_WITH_HEADER_SIZE = TransportVersions.V_7_6_0; public static final int MARKER_BYTES_SIZE = 2; diff --git a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java index 72c5a7aae005b..811a4d8faaafb 100644 --- a/server/src/main/java/org/elasticsearch/transport/TcpTransport.java +++ b/server/src/main/java/org/elasticsearch/transport/TcpTransport.java @@ -416,7 +416,7 @@ private void initiateConnection(DiscoveryNode node, ConnectionProfile connection } TimeValue connectTimeout = connectionProfile.getConnectTimeout(); - threadPool.schedule(channelsConnectedListener::onTimeout, connectTimeout, ThreadPool.Names.GENERIC); + threadPool.schedule(channelsConnectedListener::onTimeout, connectTimeout, threadPool.generic()); } @Override @@ -753,6 +753,23 @@ static void handleException(TcpChannel channel, Exception e, Lifecycle lifecycle logger.warn(() -> format("%s, [%s], closing connection", e.getMessage(), channel)); } else if (e instanceof TransportNotReadyException) { logger.debug(() -> format("%s on [%s], closing connection", e.getMessage(), channel)); + } else if (e instanceof HeaderValidationException headerValidationException) { + Header header = headerValidationException.header; + if (channel.isOpen()) { + try { + outboundHandler.sendErrorResponse( + header.getVersion(), + channel, + header.getRequestId(), + header.getActionName(), + ResponseStatsConsumer.NONE, + headerValidationException.validationException + ); + } catch (IOException inner) { + inner.addSuppressed(headerValidationException.validationException); + logger.warn(() -> "Failed to send error message back to client for validation failure", inner); + } + } } else { logger.warn(() -> "exception caught on transport layer [" + channel + "], closing connection", e); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java index 525bbb8f4af15..61b052c957ac1 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportHandshaker.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.bytes.BytesReference; @@ -115,7 +116,7 @@ final class TransportHandshaker { */ static final TransportVersion EARLIEST_HANDSHAKE_VERSION = TransportVersion.fromId(6080099); - static final TransportVersion REQUEST_HANDSHAKE_VERSION = TransportVersion.MINIMUM_COMPATIBLE; + static final TransportVersion REQUEST_HANDSHAKE_VERSION = TransportVersions.MINIMUM_COMPATIBLE; static final Set ALLOWED_HANDSHAKE_VERSIONS = Set.of(EARLIEST_HANDSHAKE_VERSION, REQUEST_HANDSHAKE_VERSION); static final String HANDSHAKE_ACTION_NAME = "internal:tcp/handshake"; @@ -159,7 +160,7 @@ void sendHandshake( threadPool.schedule( () -> handler.handleLocalException(new ConnectTransportException(node, "handshake_timeout[" + timeout + "]")), timeout, - ThreadPool.Names.GENERIC + threadPool.generic() ); success = true; } catch (Exception e) { @@ -240,7 +241,7 @@ public void handleResponse(HandshakeResponse response) { "Received message from unsupported version: [" + responseVersion + "] minimal compatible version is: [" - + TransportVersion.MINIMUM_COMPATIBLE + + TransportVersions.MINIMUM_COMPATIBLE + "]" ) ); diff --git a/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java b/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java index 4ecabaf09717e..e4a9dff24efc1 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportInterceptor.java @@ -11,19 +11,21 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.Writeable.Reader; +import java.util.concurrent.Executor; + /** * This interface allows plugins to intercept requests on both the sender and the receiver side. */ public interface TransportInterceptor { /** * This is called for each handler that is registered via - * {@link TransportService#registerRequestHandler(String, String, boolean, boolean, Reader, TransportRequestHandler)} or - * {@link TransportService#registerRequestHandler(String, String, Reader, TransportRequestHandler)}. The returned handler is + * {@link TransportService#registerRequestHandler(String, Executor, boolean, boolean, Reader, TransportRequestHandler)} or + * {@link TransportService#registerRequestHandler(String, Executor, Reader, TransportRequestHandler)}. The returned handler is * used instead of the passed in handler. By default the provided handler is returned. */ default TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { diff --git a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java index 318b1f5b5e6fc..1caa83aa40488 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportKeepAlive.java @@ -140,7 +140,7 @@ private ScheduledPing(TimeValue pingInterval) { void ensureStarted() { if (isStarted.get() == false && isStarted.compareAndSet(false, true)) { - threadPool.schedule(this, pingInterval, ThreadPool.Names.GENERIC); + threadPool.schedule(this, pingInterval, threadPool.generic()); } } @@ -175,7 +175,7 @@ public void onAfter() { return; } - threadPool.scheduleUnlessShuttingDown(pingInterval, ThreadPool.Names.GENERIC, this); + threadPool.scheduleUnlessShuttingDown(pingInterval, threadPool.generic(), this); } @Override diff --git a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java index 2d06d53b4dba0..be9e0070d05ba 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportLogger.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportLogger.java @@ -10,6 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -95,7 +96,7 @@ private static String format(TcpChannel channel, BytesReference message, String ThreadContext.readHeadersFromStream(streamInput); if (isRequest) { - if (version.before(TransportVersion.V_8_0_0)) { + if (version.before(TransportVersions.V_8_0_0)) { // discard features streamInput.readStringArray(); } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportService.java b/server/src/main/java/org/elasticsearch/transport/TransportService.java index a4f39a44f16a8..83fc21396e0f6 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportService.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportService.java @@ -79,14 +79,10 @@ public class TransportService extends AbstractLifecycleComponent * A feature flag enabling transport upgrades for serverless. */ private static final String SERVERLESS_TRANSPORT_SYSTEM_PROPERTY = "es.serverless_transport"; - private static final boolean SERVERLESS_TRANSPORT_FEATURE_FLAG; - static { - final boolean serverlessFlag = Booleans.parseBoolean(System.getProperty(SERVERLESS_TRANSPORT_SYSTEM_PROPERTY), false); - if (serverlessFlag && Build.current().isSnapshot() == false) { - throw new IllegalArgumentException("Enabling serverless transport is only supported in snapshot builds"); - } - SERVERLESS_TRANSPORT_FEATURE_FLAG = serverlessFlag; - } + private static final boolean SERVERLESS_TRANSPORT_FEATURE_FLAG = Booleans.parseBoolean( + System.getProperty(SERVERLESS_TRANSPORT_SYSTEM_PROPERTY), + false + ); public static final String DIRECT_RESPONSE_PROFILE = ".direct"; public static final String HANDSHAKE_ACTION_NAME = "internal:transport/handshake"; @@ -293,7 +289,7 @@ public TransportService( } registerRequestHandler( HANDSHAKE_ACTION_NAME, - ThreadPool.Names.SAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, false, false, HandshakeRequest::new, @@ -1004,8 +1000,8 @@ private void sendLocalRequest(long requestId, final String action, final Transpo assert false : action; throw new ActionNotFoundTransportException("Action [" + action + "] not found"); } - final String executor = reg.getExecutor(); - if (ThreadPool.Names.SAME.equals(executor)) { + final Executor executor = reg.getExecutor(); + if (executor == EsExecutors.DIRECT_EXECUTOR_SERVICE) { try (var ignored = threadPool.getThreadContext().newTraceContext()) { try { reg.processMessageReceived(request, channel); @@ -1017,7 +1013,7 @@ private void sendLocalRequest(long requestId, final String action, final Transpo boolean success = false; request.incRef(); try { - threadPool.executor(executor).execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { + executor.execute(threadPool.getThreadContext().preserveContextWithTracing(new AbstractRunnable() { @Override protected void doRun() throws Exception { reg.processMessageReceived(request, channel); @@ -1122,6 +1118,46 @@ public static boolean isValidActionName(String actionName) { return false; } + /** + * Temporary passthrough function that continues to take a String rather than Executor type. + * + * @param action + * @param executor + * @param requestReader + * @param handler + * @param + */ + public void registerRequestHandler( + String action, + String executor, + Writeable.Reader requestReader, + TransportRequestHandler handler + ) { + registerRequestHandler(action, threadPool.executor(executor), requestReader, handler); + } + + /** + * Temporary passthrough function that continues to take a String rather than Executor type. + * + * @param action + * @param executor + * @param forceExecution + * @param canTripCircuitBreaker + * @param requestReader + * @param handler + * @param + */ + public void registerRequestHandler( + String action, + String executor, + boolean forceExecution, + boolean canTripCircuitBreaker, + Writeable.Reader requestReader, + TransportRequestHandler handler + ) { + registerRequestHandler(action, threadPool.executor(executor), forceExecution, canTripCircuitBreaker, requestReader, handler); + } + /** * Registers a new request handler * @@ -1132,7 +1168,7 @@ public static boolean isValidActionName(String actionName) { */ public void registerRequestHandler( String action, - String executor, + Executor executor, Writeable.Reader requestReader, TransportRequestHandler handler ) { @@ -1163,7 +1199,7 @@ public void registerRequestHandler( */ public void registerRequestHandler( String action, - String executor, + Executor executor, boolean forceExecution, boolean canTripCircuitBreaker, Writeable.Reader requestReader, @@ -1379,7 +1415,7 @@ public String toString() { } private void scheduleTimeout(TimeValue timeout) { - this.cancellable = threadPool.schedule(this, timeout, ThreadPool.Names.GENERIC); + this.cancellable = threadPool.schedule(this, timeout, threadPool.generic()); } } diff --git a/server/src/main/java/org/elasticsearch/transport/TransportStats.java b/server/src/main/java/org/elasticsearch/transport/TransportStats.java index 85833efbbacd0..96c5a89256008 100644 --- a/server/src/main/java/org/elasticsearch/transport/TransportStats.java +++ b/server/src/main/java/org/elasticsearch/transport/TransportStats.java @@ -8,7 +8,7 @@ package org.elasticsearch.transport; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -69,7 +69,7 @@ public TransportStats(StreamInput in) throws IOException { rxSize = in.readVLong(); txCount = in.readVLong(); txSize = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0) && in.readBoolean()) { inboundHandlingTimeBucketFrequencies = new long[HandlingTimeTracker.BUCKET_COUNT]; for (int i = 0; i < inboundHandlingTimeBucketFrequencies.length; i++) { inboundHandlingTimeBucketFrequencies[i] = in.readVLong(); @@ -82,7 +82,7 @@ public TransportStats(StreamInput in) throws IOException { inboundHandlingTimeBucketFrequencies = new long[0]; outboundHandlingTimeBucketFrequencies = new long[0]; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { transportActionStats = Collections.unmodifiableMap(in.readOrderedMap(StreamInput::readString, TransportActionStats::new)); } else { transportActionStats = Map.of(); @@ -98,7 +98,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(rxSize); out.writeVLong(txCount); out.writeVLong(txSize); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { assert (inboundHandlingTimeBucketFrequencies.length > 0) == (outboundHandlingTimeBucketFrequencies.length > 0); out.writeBoolean(inboundHandlingTimeBucketFrequencies.length > 0); for (long handlingTimeBucketFrequency : inboundHandlingTimeBucketFrequencies) { @@ -108,8 +108,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(handlingTimeBucketFrequency); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - out.writeMap(transportActionStats, StreamOutput::writeString, StreamOutput::writeWriteable); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + out.writeMap(transportActionStats, StreamOutput::writeWriteable); } // else just drop these stats } diff --git a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java index d6f31b6a89ef8..75ab5db982235 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java +++ b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java @@ -9,6 +9,7 @@ package org.elasticsearch.upgrades; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -40,7 +41,7 @@ */ public class FeatureMigrationResults implements Metadata.Custom { public static final String TYPE = "system_index_migration"; - public static final TransportVersion MIGRATION_ADDED_VERSION = TransportVersion.V_8_0_0; + public static final TransportVersion MIGRATION_ADDED_VERSION = TransportVersions.V_8_0_0; private static final ParseField RESULTS_FIELD = new ParseField("results"); @@ -74,11 +75,7 @@ public FeatureMigrationResults(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap( - featureStatuses, - StreamOutput::writeString, - (StreamOutput outStream, SingleFeatureMigrationResult featureStatus) -> featureStatus.writeTo(outStream) - ); + out.writeMap(featureStatuses, StreamOutput::writeWriteable); } @Override diff --git a/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java b/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java index 8aa0a4b7af5de..67703919523ef 100644 --- a/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java +++ b/server/src/main/java/org/elasticsearch/watcher/ResourceWatcherService.java @@ -15,7 +15,6 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; import java.io.Closeable; import java.io.IOException; @@ -94,9 +93,10 @@ public ResourceWatcherService(Settings settings, ThreadPool threadPool) { interval = RELOAD_INTERVAL_HIGH.get(settings); highMonitor = new ResourceMonitor(interval, Frequency.HIGH); if (enabled) { - lowFuture = threadPool.scheduleWithFixedDelay(lowMonitor, lowMonitor.interval, Names.GENERIC); - mediumFuture = threadPool.scheduleWithFixedDelay(mediumMonitor, mediumMonitor.interval, Names.GENERIC); - highFuture = threadPool.scheduleWithFixedDelay(highMonitor, highMonitor.interval, Names.GENERIC); + final var executor = threadPool.generic(); + lowFuture = threadPool.scheduleWithFixedDelay(lowMonitor, lowMonitor.interval, executor); + mediumFuture = threadPool.scheduleWithFixedDelay(mediumMonitor, mediumMonitor.interval, executor); + highFuture = threadPool.scheduleWithFixedDelay(highMonitor, highMonitor.interval, executor); } else { lowFuture = null; mediumFuture = null; diff --git a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json index c6dc459efa90b..ed1041acf3f54 100644 --- a/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json +++ b/server/src/main/resources/org/elasticsearch/common/reference-docs-links.json @@ -1,7 +1,7 @@ { "INITIAL_MASTER_NODES": "important-settings.html#initial_master_nodes", "DISCOVERY_TROUBLESHOOTING": "discovery-troubleshooting.html", - "UNSTABLE_CLUSTER_TROUBLESHOOTING": "cluster-fault-detection.html#cluster-fault-detection-troubleshooting", + "UNSTABLE_CLUSTER_TROUBLESHOOTING": "troubleshooting-unstable-cluster.html", "LAGGING_NODE_TROUBLESHOOTING": "cluster-fault-detection.html#_diagnosing_lagging_nodes", "SHARD_LOCK_TROUBLESHOOTING": "cluster-fault-detection.html#_diagnosing_shardlockobtainfailedexception_failures", "CONCURRENT_REPOSITORY_WRITERS": "add-repository.html", diff --git a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index c4560b1fe4bb1..3c313ab5cd29f 100644 --- a/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -355,7 +355,7 @@ public void testSearchContextMissingException() throws IOException { TransportVersion version = TransportVersionUtils.randomVersion(random()); SearchContextMissingException ex = serialize(new SearchContextMissingException(contextId), version); assertThat(ex.contextId().getId(), equalTo(contextId.getId())); - if (version.onOrAfter(TransportVersion.V_7_7_0)) { + if (version.onOrAfter(TransportVersions.V_7_7_0)) { assertThat(ex.contextId().getSessionId(), equalTo(contextId.getSessionId())); } else { assertThat(ex.contextId().getSessionId(), equalTo("")); @@ -365,7 +365,7 @@ public void testSearchContextMissingException() throws IOException { public void testCircuitBreakingException() throws IOException { CircuitBreakingException ex = serialize( new CircuitBreakingException("Too large", 0, 100, CircuitBreaker.Durability.TRANSIENT), - TransportVersion.V_7_0_0 + TransportVersions.V_7_0_0 ); assertEquals("Too large", ex.getMessage()); assertEquals(100, ex.getByteLimit()); diff --git a/server/src/test/java/org/elasticsearch/TransportVersionTests.java b/server/src/test/java/org/elasticsearch/TransportVersionTests.java index 32265e8549e0c..b8b8380ee4a96 100644 --- a/server/src/test/java/org/elasticsearch/TransportVersionTests.java +++ b/server/src/test/java/org/elasticsearch/TransportVersionTests.java @@ -28,8 +28,8 @@ public class TransportVersionTests extends ESTestCase { public void testVersionComparison() { - TransportVersion V_7_2_0 = TransportVersion.V_7_2_0; - TransportVersion V_8_0_0 = TransportVersion.V_8_0_0; + TransportVersion V_7_2_0 = TransportVersions.V_7_2_0; + TransportVersion V_8_0_0 = TransportVersions.V_8_0_0; assertThat(V_7_2_0.before(V_8_0_0), is(true)); assertThat(V_7_2_0.before(V_7_2_0), is(false)); assertThat(V_8_0_0.before(V_7_2_0), is(false)); @@ -66,7 +66,7 @@ public static class DuplicatedIdFakeVersion { public void testStaticTransportVersionChecks() { assertThat( - TransportVersion.getAllVersionIds(CorrectFakeVersion.class), + TransportVersions.getAllVersionIds(CorrectFakeVersion.class), equalTo( Map.of( 199, @@ -80,7 +80,7 @@ public void testStaticTransportVersionChecks() { ) ) ); - AssertionError e = expectThrows(AssertionError.class, () -> TransportVersion.getAllVersionIds(DuplicatedIdFakeVersion.class)); + AssertionError e = expectThrows(AssertionError.class, () -> TransportVersions.getAllVersionIds(DuplicatedIdFakeVersion.class)); assertThat(e.getMessage(), containsString("have the same version number")); } @@ -160,7 +160,7 @@ public void testMax() { } public void testVersionConstantPresent() { - Set ignore = Set.of(TransportVersion.ZERO, TransportVersion.current(), TransportVersion.MINIMUM_COMPATIBLE); + Set ignore = Set.of(TransportVersions.ZERO, TransportVersion.current(), TransportVersions.MINIMUM_COMPATIBLE); assertThat(TransportVersion.current(), sameInstance(TransportVersion.fromId(TransportVersion.current().id()))); final int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { @@ -171,7 +171,7 @@ public void testVersionConstantPresent() { } public void testCURRENTIsLatest() { - assertThat(Collections.max(TransportVersion.getAllVersions()), is(TransportVersion.current())); + assertThat(Collections.max(TransportVersions.getAllVersions()), is(TransportVersion.current())); } public void testToString() { diff --git a/server/src/test/java/org/elasticsearch/VersionTests.java b/server/src/test/java/org/elasticsearch/VersionTests.java index 2e84465648650..99f78f4e8d023 100644 --- a/server/src/test/java/org/elasticsearch/VersionTests.java +++ b/server/src/test/java/org/elasticsearch/VersionTests.java @@ -8,10 +8,8 @@ package org.elasticsearch; -import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.VersionUtils; -import org.hamcrest.Matchers; import java.lang.reflect.Modifier; import java.util.HashMap; @@ -80,12 +78,10 @@ public void testMax() { public void testVersionConstantPresent() { assertThat(Version.CURRENT, sameInstance(Version.fromId(Version.CURRENT.id))); - assertThat(Version.CURRENT.luceneVersion(), equalTo(org.apache.lucene.util.Version.LATEST)); final int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { Version version = randomVersion(random()); assertThat(version, sameInstance(Version.fromId(version.id))); - assertThat(version.luceneVersion(), sameInstance(Version.fromId(version.id).luceneVersion())); } } @@ -167,7 +163,7 @@ public void testParseVersion() { for (int i = 0; i < iters; i++) { Version version = randomVersion(random()); if (random().nextBoolean()) { - version = new Version(version.id, version.indexVersion); + version = new Version(version.id); } Version parsedVersion = Version.fromString(version.toString()); assertEquals(version, parsedVersion); @@ -181,15 +177,6 @@ public void testParseVersion() { expectThrows(IllegalArgumentException.class, () -> { Version.fromString("5.0.0-SNAPSHOT"); }); } - public void testParseLenient() { - // note this is just a silly sanity check, we test it in lucene - for (Version version : VersionUtils.allReleasedVersions()) { - org.apache.lucene.util.Version luceneVersion = version.luceneVersion(); - String string = luceneVersion.toString().toUpperCase(Locale.ROOT).replaceFirst("^LUCENE_(\\d+)_(\\d+)$", "$1.$2"); - assertThat(luceneVersion, Matchers.equalTo(Lucene.parseVersionLenient(string, null))); - } - } - public void testAllVersionsMatchId() throws Exception { final Set releasedVersions = new HashSet<>(VersionUtils.allReleasedVersions()); final Set unreleasedVersions = new HashSet<>(VersionUtils.allUnreleasedVersions()); @@ -237,25 +224,6 @@ public void testAllVersionsMatchId() throws Exception { } } - // this test ensures we never bump the lucene version in a bugfix release - public void testLuceneVersionIsSameOnMinorRelease() { - for (Version version : VersionUtils.allReleasedVersions()) { - for (Version other : VersionUtils.allReleasedVersions()) { - if (other.onOrAfter(version)) { - assertTrue( - "lucene versions must be " + other + " >= " + version, - other.luceneVersion().onOrAfter(version.luceneVersion()) - ); - } - if (other.major == version.major && other.minor == version.minor) { - assertEquals(version + " vs. " + other, other.luceneVersion().major, version.luceneVersion().major); - assertEquals(version + " vs. " + other, other.luceneVersion().minor, version.luceneVersion().minor); - // should we also assert the lucene bugfix version? - } - } - } - } - public static void assertUnknownVersion(Version version) { assertFalse( "Version " + version + " has been releaed don't use a new instance of this version", diff --git a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java index b181bc4f0e8b2..cf47930c1e5ca 100644 --- a/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java +++ b/server/src/test/java/org/elasticsearch/action/OriginalIndicesTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -45,7 +45,8 @@ public void testOriginalIndicesSerialization() throws IOException { // indices options are not equivalent when sent to an older version and re-read due // to the addition of hidden indices as expand to hidden indices is always true when // read from a prior version - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) || originalIndices.indicesOptions().expandWildcardsHidden()) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0) + || originalIndices.indicesOptions().expandWildcardsHidden()) { assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions())); } else if (originalIndices.indicesOptions().expandWildcardsHidden()) { assertThat(originalIndices2.indicesOptions(), equalTo(originalIndices.indicesOptions())); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java index 51ea15db67943..9cb47791d4f98 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/stats/NodesStatsRequestTests.java @@ -11,11 +11,15 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; import java.util.HashSet; +import java.util.Map; import java.util.Set; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.in; @@ -137,4 +141,28 @@ private static void assertRequestsEqual(NodesStatsRequest request1, NodesStatsRe assertThat(request1.indices().getFlags(), equalTo(request2.indices().getFlags())); assertThat(request1.requestedMetrics(), equalTo(request2.requestedMetrics())); } + + public void testGetDescription() { + final var request = new NodesStatsRequest("nodeid1", "nodeid2"); + request.clear(); + request.addMetrics(NodesStatsRequest.Metric.OS.metricName(), NodesStatsRequest.Metric.TRANSPORT.metricName()); + request.indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store, CommonStatsFlags.Flag.Flush)); + final var description = request.getDescription(); + + assertThat( + description, + allOf( + containsString("nodeid1"), + containsString("nodeid2"), + containsString(NodesStatsRequest.Metric.OS.metricName()), + containsString(NodesStatsRequest.Metric.TRANSPORT.metricName()), + not(containsString(NodesStatsRequest.Metric.SCRIPT.metricName())), + containsString(CommonStatsFlags.Flag.Store.toString()), + containsString(CommonStatsFlags.Flag.Flush.toString()), + not(containsString(CommonStatsFlags.Flag.FieldData.toString())) + ) + ); + + assertEquals(description, request.createTask(1, "", "", TaskId.EMPTY_TASK_ID, Map.of()).getDescription()); + } } diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java index 627cbd6039f1b..bb0e9977e3ac7 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TaskManagerTestCase.java @@ -119,12 +119,12 @@ protected NodesResponse(ClusterName clusterName, List nodes, List< @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public int failureCount() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java index 819e37a32bfe7..e8a3338559947 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/node/tasks/TestTaskPlugin.java @@ -141,12 +141,12 @@ public static class NodesResponse extends BaseNodesResponse implem @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public int getFailureCount() { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index ddc5e8bec61c1..d2a482fa58b0e 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -8,10 +8,11 @@ package org.elasticsearch.action.admin.cluster.reroute; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateTests; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -34,6 +35,7 @@ import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Objects; import static org.elasticsearch.common.util.CollectionUtils.appendToCopy; import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; @@ -46,7 +48,7 @@ protected List filteredWarnings() { } public void testToXContent() throws IOException { - assertXContent(createClusterRerouteResponse(createClusterState()), new ToXContent.MapParams(Map.of("metric", "none")), 2, """ + assertXContent(createClusterRerouteResponse(createClusterState()), new ToXContent.MapParams(Map.of("metric", "none")), """ { "acknowledged": true }"""); @@ -57,7 +59,6 @@ public void testToXContentWithExplain() { assertXContent( createClusterRerouteResponse(clusterState), new ToXContent.MapParams(Map.of("explain", "true", "metric", "none")), - 2, Strings.format(""" { "acknowledged": true, @@ -87,7 +88,6 @@ public void testToXContentWithDeprecatedClusterState() { assertXContent( createClusterRerouteResponse(clusterState), ToXContent.EMPTY_PARAMS, - 35, Strings.format( """ { @@ -132,6 +132,12 @@ public void testToXContentWithDeprecatedClusterState() { "transport_version": "8000099" } ], + "nodes_versions": [ + { + "node_id": "node0", + "transport_version": "8000099" + } + ], "metadata": { "cluster_uuid": "_na_", "cluster_uuid_committed": false, @@ -199,7 +205,7 @@ public void testToXContentWithDeprecatedClusterState() { Version.CURRENT, IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), - Version.CURRENT.id + IndexVersion.current() ), """ The [state] field in the response to the reroute API is deprecated and will be removed in a future version. \ @@ -211,7 +217,6 @@ public void testToXContentWithDeprecatedClusterStateAndMetadata() { assertXContent( createClusterRerouteResponse(createClusterState()), new ToXContent.MapParams(Map.of("metric", "metadata", "settings_filter", "index.number*,index.version.created")), - 19, """ { "acknowledged" : true, @@ -274,7 +279,6 @@ public void testToXContentWithDeprecatedClusterStateAndMetadata() { private void assertXContent( ClusterRerouteResponse response, ToXContent.Params params, - int expectedChunks, String expectedBody, String... criticalDeprecationWarnings ) { @@ -289,6 +293,10 @@ private void assertXContent( throw new AssertionError("unexpected", e); } + final var expectedChunks = Objects.equals(params.param("metric"), "none") + ? 2 + : 4 + ClusterStateTests.expectedChunkCount(params, response.getState()); + AbstractChunkedSerializingTestCase.assertChunkCount(response, params, ignored -> expectedChunks); assertCriticalWarnings(criticalDeprecationWarnings); @@ -320,7 +328,7 @@ private static ClusterState createClusterState() { var node0 = DiscoveryNodeUtils.create("node0", new TransportAddress(TransportAddress.META_ADDRESS, 9000)); return ClusterState.builder(new ClusterName("test")) .nodes(new DiscoveryNodes.Builder().add(node0).masterNodeId(node0.getId()).build()) - .putTransportVersion(node0.getId(), TransportVersion.V_8_0_0) + .putTransportVersion(node0.getId(), TransportVersions.V_8_0_0) .metadata( Metadata.builder() .put( diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java index e4a9cb17f4d9a..76abc4733d3ff 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/shards/ClusterSearchShardsRequestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.shards; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -55,7 +56,7 @@ public void testSerialization() throws Exception { // indices options are not equivalent when sent to an older version and re-read due // to the addition of hidden indices as expand to hidden indices is always true when // read from a prior version - if (version.onOrAfter(TransportVersion.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { + if (version.onOrAfter(TransportVersions.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { assertEquals(request.indicesOptions(), deserialized.indicesOptions()); } assertEquals(request.routing(), deserialized.routing()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java index 2d261ed2a72b4..f64f6a5d8275b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponseTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.snapshots.Snapshot; import org.elasticsearch.snapshots.SnapshotFeatureInfo; @@ -60,7 +61,7 @@ private GetSnapshotsResponse copyInstance(GetSnapshotsResponse instance) throws return copyInstance( instance, new NamedWriteableRegistry(Collections.emptyList()), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, GetSnapshotsResponse::new, TransportVersion.current() ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponseSerializationTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponseSerializationTests.java index 7f00ff4a6bb3d..79d047c18189b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponseSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/snapshots/get/shard/GetShardSnapshotResponseSerializationTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.shard.ShardId; @@ -54,7 +55,7 @@ private GetShardSnapshotResponse copyInstance(GetShardSnapshotResponse instance) return copyInstance( instance, new NamedWriteableRegistry(Collections.emptyList()), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, GetShardSnapshotResponse::new, TransportVersion.current() ); diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java index 7fb9d6f19e35c..17d4642ea45be 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/state/ClusterStateRequestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.cluster.state; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,11 +38,11 @@ public void testSerialization() throws Exception { TransportVersion testVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current() ); // TODO: change version to V_6_6_0 after backporting: - if (testVersion.onOrAfter(TransportVersion.V_7_0_0)) { + if (testVersion.onOrAfter(TransportVersions.V_7_0_0)) { if (randomBoolean()) { clusterStateRequest.waitForMetadataVersion(randomLongBetween(1, Long.MAX_VALUE)); } diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java index 914e8db1caa1b..dc2f5f019e28b 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/close/CloseIndexRequestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.close; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.ActiveShardCount; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -55,10 +56,11 @@ public void testBwcSerialization() throws Exception { // to the addition of hidden indices as expand to hidden indices is always true when // read from a prior version // TODO update version on backport! - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) || request.indicesOptions().expandWildcardsHidden()) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0) + || request.indicesOptions().expandWildcardsHidden()) { assertEquals(request.indicesOptions(), indicesOptions); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { assertEquals(request.waitForActiveShards(), ActiveShardCount.readFrom(in)); } else { assertEquals(0, in.available()); @@ -76,7 +78,7 @@ public void testBwcSerialization() throws Exception { out.writeTimeValue(sample.timeout()); out.writeStringArray(sample.indices()); sample.indicesOptions().writeIndicesOptions(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { sample.waitForActiveShards().writeTo(out); } @@ -93,10 +95,10 @@ public void testBwcSerialization() throws Exception { // to the addition of hidden indices as expand to hidden indices is always true when // read from a prior version // TODO change version on backport - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0) || sample.indicesOptions().expandWildcardsHidden()) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0) || sample.indicesOptions().expandWildcardsHidden()) { assertEquals(sample.indicesOptions(), deserializedRequest.indicesOptions()); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { assertEquals(sample.waitForActiveShards(), deserializedRequest.waitForActiveShards()); } else { assertEquals(ActiveShardCount.NONE, deserializedRequest.waitForActiveShards()); diff --git a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java index 32554a65d11d3..4c86faecd7aa6 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/indices/resolve/TransportResolveIndexActionTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.admin.indices.resolve; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; @@ -49,7 +50,7 @@ public void testCCSCompatibilityCheck() throws Exception { .build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_CCS_VERSION, true); + TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java index aa4ad9824fd0f..09f79d1e48d0d 100644 --- a/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java +++ b/server/src/test/java/org/elasticsearch/action/bulk/BulkProcessorTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.threadpool.ScheduledExecutorServiceScheduler; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -39,6 +40,7 @@ import java.util.ArrayList; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; @@ -225,7 +227,7 @@ public void testConcurrentExecutions() throws Exception { maxBatchSize, ByteSizeValue.ofBytes(Integer.MAX_VALUE), null, - (command, delay, executor) -> null, + UnusedScheduler.INSTANCE, () -> called.set(true), BulkRequest::new ) @@ -344,9 +346,7 @@ public void testConcurrentExecutionsWithFlush() throws Exception { maxBatchSize, ByteSizeValue.ofBytes(Integer.MAX_VALUE), TimeValue.timeValueMillis(simulateWorkTimeInMillis * 2), - (command, delay, executor) -> Scheduler.wrapAsScheduledCancellable( - flushExecutor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS) - ), + new ScheduledExecutorServiceScheduler(flushExecutor), () -> { flushExecutor.shutdown(); try { @@ -447,7 +447,7 @@ public void testAwaitOnCloseCallsOnClose() throws Exception { 10, ByteSizeValue.ofBytes(1000), null, - (command, delay, executor) -> null, + UnusedScheduler.INSTANCE, () -> called.set(true), BulkRequest::new ); @@ -546,4 +546,13 @@ public void afterBulk(long executionId, BulkRequest request, Throwable failure) private DocWriteResponse mockResponse() { return new IndexResponse(new ShardId("index", "uid", 0), "id", 1, 1, 1, true); } + + private static class UnusedScheduler implements Scheduler { + static UnusedScheduler INSTANCE = new UnusedScheduler(); + + @Override + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { + throw new AssertionError("should not be called"); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/downsample/DonwsampleConfigTests.java b/server/src/test/java/org/elasticsearch/action/downsample/DonwsampleConfigTests.java index b538b2251b3fd..0f435b2e724e1 100644 --- a/server/src/test/java/org/elasticsearch/action/downsample/DonwsampleConfigTests.java +++ b/server/src/test/java/org/elasticsearch/action/downsample/DonwsampleConfigTests.java @@ -28,7 +28,19 @@ public void testGenerateDownsampleIndexName() { { String downsampledIndex = "downsample-1h-test"; IndexMetadata indexMeta = IndexMetadata.builder(downsampledIndex) - .settings(indexSettings(IndexVersion.current(), 1, 0).put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY, "test")) + .settings(indexSettings(IndexVersion.current(), 1, 0).put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME_KEY, "test")) + .build(); + assertThat(generateDownsampleIndexName("downsample-", indexMeta, new DateHistogramInterval("8h")), is("downsample-8h-test")); + } + + { + // test origin takes higher precedence than the configured source setting + String downsampledIndex = "downsample-1h-test"; + IndexMetadata indexMeta = IndexMetadata.builder(downsampledIndex) + .settings( + indexSettings(IndexVersion.current(), 1, 0).put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME_KEY, "test") + .put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY, "downsample-1s-test") + ) .build(); assertThat(generateDownsampleIndexName("downsample-", indexMeta, new DateHistogramInterval("8h")), is("downsample-8h-test")); } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java index 64919cecbdcfc..a95340e2fffd1 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesNodeResponseTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.UUIDs; @@ -99,7 +100,7 @@ public void testSerializeNodeResponseBetweenNewNodes() throws Exception { FieldCapabilitiesNodeResponse inNode = randomNodeResponse(indexResponses); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_8_2_0, + TransportVersions.V_8_2_0, TransportVersion.current() ); final FieldCapabilitiesNodeResponse outNode = copyInstance(inNode, version); @@ -129,8 +130,8 @@ public void testSerializeNodeResponseBetweenNewNodes() throws Exception { } public void testSerializeNodeResponseBetweenOldNodes() throws IOException { - final TransportVersion minCompactVersion = TransportVersion.MINIMUM_COMPATIBLE; - assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(TransportVersion.V_8_2_0)); + final TransportVersion minCompactVersion = TransportVersions.MINIMUM_COMPATIBLE; + assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(TransportVersions.V_8_2_0)); List indexResponses = CollectionUtils.concatLists( randomIndexResponsesWithMappingHash(randomMappingHashToIndices()), randomIndexResponsesWithoutMappingHash() @@ -140,7 +141,7 @@ public void testSerializeNodeResponseBetweenOldNodes() throws IOException { TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), minCompactVersion, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_2_0) + TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_2_0) ); final FieldCapabilitiesNodeResponse outResponse = copyInstance(inResponse, version); assertThat(outResponse.getFailures().keySet(), equalTo(inResponse.getFailures().keySet())); @@ -154,7 +155,7 @@ public void testSerializeNodeResponseBetweenOldNodes() throws IOException { assertThat(outList.get(i).canMatch(), equalTo(inList.get(i).canMatch())); Map outCap = outList.get(i).get(); Map inCap = inList.get(i).get(); - if (version.onOrAfter(TransportVersion.V_8_0_0)) { + if (version.onOrAfter(TransportVersions.V_8_0_0)) { assertThat(outCap, equalTo(inCap)); } else { // Exclude metric types which was introduced in 8.0 @@ -177,7 +178,7 @@ public void testReadNodeResponseFromPre82() throws Exception { + "RleF8wMgAACGluZGV4XzAzAgdfc2VxX25vB19zZXFfbm8EbG9uZwEBAQAAAAx5ZWxsb3dfZmllbGQMeWVsbG93X2ZpZWxkB2tleXdvcmQAAQEAAAABAAEI" + "aW5kZXhfMTAGdXVpZF9hAQ=="; StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(base64)); - in.setTransportVersion(TransportVersion.V_8_1_0); + in.setTransportVersion(TransportVersions.V_8_1_0); FieldCapabilitiesNodeResponse nodeResp = new FieldCapabilitiesNodeResponse(in); assertThat(nodeResp.getUnmatchedShardIds(), equalTo(Set.of(new ShardId("index_10", "uuid_a", 1)))); assertThat(nodeResp.getFailures(), anEmptyMap()); diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java index 3735bd8242ee2..cc7c76553ef99 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/FieldCapabilitiesResponseTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchExceptionTests; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.bytes.BytesReference; @@ -169,7 +170,7 @@ public void testSerializeCCSResponseBetweenNewClusters() throws Exception { FieldCapabilitiesResponse inResponse = randomCCSResponse(indexResponses); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_8_2_0, + TransportVersions.V_8_2_0, TransportVersion.current() ); final FieldCapabilitiesResponse outResponse = copyInstance(inResponse, version); @@ -201,8 +202,8 @@ public void testSerializeCCSResponseBetweenNewClusters() throws Exception { } public void testSerializeCCSResponseBetweenOldClusters() throws IOException { - TransportVersion minCompactVersion = TransportVersion.MINIMUM_COMPATIBLE; - assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(TransportVersion.V_8_2_0)); + TransportVersion minCompactVersion = TransportVersions.MINIMUM_COMPATIBLE; + assertTrue("Remove this test once minCompactVersion >= 8.2.0", minCompactVersion.before(TransportVersions.V_8_2_0)); List indexResponses = CollectionUtils.concatLists( randomIndexResponsesWithMappingHash(randomMappingHashToIndices()), randomIndexResponsesWithoutMappingHash() @@ -212,7 +213,7 @@ public void testSerializeCCSResponseBetweenOldClusters() throws IOException { TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), minCompactVersion, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_2_0) + TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_2_0) ); final FieldCapabilitiesResponse outResponse = copyInstance(inResponse, version); assertThat( @@ -228,7 +229,7 @@ public void testSerializeCCSResponseBetweenOldClusters() throws IOException { assertThat(outList.get(i).canMatch(), equalTo(inList.get(i).canMatch())); Map outCap = outList.get(i).get(); Map inCap = inList.get(i).get(); - if (version.onOrAfter(TransportVersion.V_8_0_0)) { + if (version.onOrAfter(TransportVersions.V_8_0_0)) { assertThat(outCap, equalTo(inCap)); } else { // Exclude metric types which was introduced in 8.0 @@ -251,7 +252,7 @@ public void testReadCCSResponseFromPre82() throws Exception { + "GluZGV4XzAyAAAIaW5kZXhfMDMCDHllbGxvd19maWVsZAx5ZWxsb3dfZmllbGQHa2V5d29yZAABAQAAAAdfc2VxX25vB19zZXFfbm8EbG9uZwEBAQAAAA" + "EAAAAAAAAAAAA="; StreamInput in = StreamInput.wrap(Base64.getDecoder().decode(base64)); - in.setTransportVersion(TransportVersion.V_8_1_0); + in.setTransportVersion(TransportVersions.V_8_1_0); FieldCapabilitiesResponse nodeResp = new FieldCapabilitiesResponse(in); assertThat(nodeResp.getFailures(), empty()); assertThat( diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/ResponseRewriterTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/ResponseRewriterTests.java index 9264db0c5aefc..678f278d452b1 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/ResponseRewriterTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/ResponseRewriterTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.fieldcaps; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; @@ -26,7 +26,7 @@ public void testExcludeMetadata() { ); Map rewritten = ResponseRewriter.rewriteOldResponses( - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, oldResponse, new String[] { "-metadata" }, Strings.EMPTY_ARRAY @@ -45,7 +45,7 @@ public void testIncludeOnlyMetadata() { ); Map rewritten = ResponseRewriter.rewriteOldResponses( - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, oldResponse, new String[] { "+metadata" }, Strings.EMPTY_ARRAY @@ -66,7 +66,7 @@ public void testExcludeNested() { ); Map rewritten = ResponseRewriter.rewriteOldResponses( - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, oldResponse, new String[] { "-nested" }, Strings.EMPTY_ARRAY @@ -90,7 +90,7 @@ public void testExcludeMultifield() { ); Map rewritten = ResponseRewriter.rewriteOldResponses( - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, oldResponse, new String[] { "-multifield" }, Strings.EMPTY_ARRAY @@ -112,7 +112,7 @@ public void testExcludeParents() { ); Map rewritten = ResponseRewriter.rewriteOldResponses( - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, oldResponse, new String[] { "-parent" }, Strings.EMPTY_ARRAY @@ -134,7 +134,7 @@ public void testAllowedTypes() { ); Map rewritten = ResponseRewriter.rewriteOldResponses( - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, oldResponse, Strings.EMPTY_ARRAY, new String[] { "text", "keyword" } diff --git a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java index bfae7d7c3bf4f..6b0fea6271f5e 100644 --- a/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/fieldcaps/TransportFieldCapabilitiesActionTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.fieldcaps; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.action.support.PlainActionFuture; @@ -51,7 +52,7 @@ public void testCCSCompatibilityCheck() throws Exception { .build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_CCS_VERSION, true); + TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); try { TransportService transportService = MockTransportService.createNewService( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/action/get/GetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/GetRequestTests.java index 4f62566b4a0a6..ee11674fb2794 100644 --- a/server/src/test/java/org/elasticsearch/action/get/GetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/GetRequestTests.java @@ -7,7 +7,7 @@ */ package org.elasticsearch.action.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -41,7 +41,7 @@ public void testForceSyntheticUnsupported() { GetRequest request = new GetRequest("index", "id"); request.setForceSyntheticSource(true); StreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_3_0); + out.setTransportVersion(TransportVersions.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> request.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java index 15e3c95591d6e..eb9cfa4a6939c 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetRequestTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -124,7 +124,7 @@ public void testForceSyntheticUnsupported() { MultiGetRequest request = createTestInstance(); request.setForceSyntheticSource(true); StreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_3_0); + out.setTransportVersion(TransportVersions.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> request.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java b/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java index 2f76127b88e26..e2b58ca2d2f1b 100644 --- a/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/get/MultiGetShardRequestTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.action.get; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -102,7 +102,7 @@ protected MultiGetShardRequest mutateInstance(MultiGetShardRequest instance) thr public void testForceSyntheticUnsupported() { MultiGetShardRequest request = createTestInstance(true); StreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_3_0); + out.setTransportVersion(TransportVersions.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> request.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java index 8800b7739c936..203f2b74c5c4f 100644 --- a/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/index/IndexRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.action.index; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.DocWriteRequest; @@ -224,8 +225,8 @@ public void testSerializeDynamicTemplates() throws Exception { indexRequest.setDynamicTemplates(dynamicTemplates); TransportVersion ver = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_7_13_0) + TransportVersions.V_7_0_0, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_7_13_0) ); BytesStreamOutput out = new BytesStreamOutput(); out.setTransportVersion(ver); @@ -243,7 +244,7 @@ public void testSerializeDynamicTemplates() throws Exception { indexRequest.setDynamicTemplates(dynamicTemplates); TransportVersion ver = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_13_0, + TransportVersions.V_7_13_0, TransportVersion.current() ); BytesStreamOutput out = new BytesStreamOutput(); diff --git a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java index 6b7b1ab63653d..7e1e7de03e288 100644 --- a/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/OpenPointInTimeRequestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -95,9 +96,9 @@ protected OpenPointInTimeRequest mutateInstance(OpenPointInTimeRequest in) throw } public void testUseDefaultConcurrentForOldVersion() throws Exception { - TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_500_017); + TransportVersion previousVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_500_020); try (BytesStreamOutput output = new BytesStreamOutput()) { - TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_0_0, previousVersion); + TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_0_0, previousVersion); output.setTransportVersion(version); OpenPointInTimeRequest original = createTestInstance(); original.writeTo(output); diff --git a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java index d1a60e7079884..f18d69c442b4b 100644 --- a/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/QueryPhaseResultConsumerTests.java @@ -92,7 +92,13 @@ public void testProgressListenerExceptionsAreCaught() throws Exception { for (int i = 0; i < 10; i++) { searchShards.add(new SearchShard(null, new ShardId("index", "uuid", i))); } - searchProgressListener.notifyListShards(searchShards, Collections.emptyList(), SearchResponse.Clusters.EMPTY, false); + long timestamp = randomLongBetween(1000, Long.MAX_VALUE - 1000); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider( + timestamp, + timestamp, + () -> timestamp + 1000 + ); + searchProgressListener.notifyListShards(searchShards, Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, timeProvider); SearchRequest searchRequest = new SearchRequest("index"); searchRequest.setBatchedReduceSize(2); @@ -142,13 +148,14 @@ protected void onListShards( List shards, List skippedShards, SearchResponse.Clusters clusters, - boolean fetchPhase + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider ) { throw new UnsupportedOperationException(); } @Override - protected void onQueryResult(int shardIndex) { + protected void onQueryResult(int shardIndex, QuerySearchResult queryResult) { onQueryResult.incrementAndGet(); throw new UnsupportedOperationException(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java index 7e0add263618b..93436ed9b0768 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchPhaseControllerTests.java @@ -1128,7 +1128,7 @@ public void testProgressListener() throws Exception { AtomicReference totalHitsListener = new AtomicReference<>(); SearchProgressListener progressListener = new SearchProgressListener() { @Override - public void onQueryResult(int shardIndex) { + public void onQueryResult(int shardIndex, QuerySearchResult queryResult) { assertThat(shardIndex, lessThan(expectedNumResults)); numQueryResultListener.incrementAndGet(); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java index aa0c7ad6d499a..8005f2f412699 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchRequestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.action.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.common.Strings; @@ -117,7 +118,7 @@ public void testSerializationMultiKNN() throws Exception { searchRequest, namedWriteableRegistry, SearchRequest::new, - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_4_0, TransportVersion.V_8_6_0) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_4_0, TransportVersions.V_8_6_0) ) ); @@ -130,30 +131,30 @@ public void testSerializationMultiKNN() throws Exception { searchRequest, namedWriteableRegistry, SearchRequest::new, - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_4_0, TransportVersion.V_8_6_0) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_4_0, TransportVersions.V_8_6_0) ); } public void testRandomVersionSerialization() throws IOException { SearchRequest searchRequest = createSearchRequest(); TransportVersion version = TransportVersionUtils.randomVersion(random()); - if (version.before(TransportVersion.V_7_11_0) && searchRequest.source() != null) { + if (version.before(TransportVersions.V_7_11_0) && searchRequest.source() != null) { // Versions before 7.11.0 don't support runtime mappings searchRequest.source().runtimeMappings(emptyMap()); } - if (version.before(TransportVersion.V_8_4_0)) { + if (version.before(TransportVersions.V_8_4_0)) { // Versions before 8.4.0 don't support force_synthetic_source searchRequest.setForceSyntheticSource(false); } - if (version.before(TransportVersion.V_8_7_0) && searchRequest.hasKnnSearch() && searchRequest.source().knnSearch().size() > 1) { + if (version.before(TransportVersions.V_8_7_0) && searchRequest.hasKnnSearch() && searchRequest.source().knnSearch().size() > 1) { // Versions before 8.7.0 don't support more than one KNN clause searchRequest.source().knnSearch(List.of(searchRequest.source().knnSearch().get(0))); } - if (version.before(TransportVersion.V_8_8_0) && searchRequest.source() != null) { + if (version.before(TransportVersions.V_8_8_0) && searchRequest.source() != null) { // Versions before 8.8 don't support rank searchRequest.source().rankBuilder(null); } - if (version.before(TransportVersion.V_8_500_013) && searchRequest.source() != null) { + if (version.before(TransportVersions.V_8_500_020) && searchRequest.source() != null) { // Versions before 8_500_999 don't support queries searchRequest.source().subSearches(new ArrayList<>()); } @@ -534,7 +535,7 @@ public void testForceSyntheticUnsupported() { SearchRequest request = new SearchRequest(); request.setForceSyntheticSource(true); StreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_3_0); + out.setTransportVersion(TransportVersions.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> request.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java index dfc39cce774e3..eed21c2e2e17f 100644 --- a/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/SearchResponseTests.java @@ -200,7 +200,7 @@ static SearchResponse.Clusters createCCSClusterObject( remoteClusterIndices.put("cluster_" + i, new OriginalIndices(new String[] { "foo", "bar*" }, IndicesOptions.lenientExpand())); } - SearchResponse.Clusters clusters = new SearchResponse.Clusters(localIndices, remoteClusterIndices, ccsMinimizeRoundtrips); + var clusters = new SearchResponse.Clusters(localIndices, remoteClusterIndices, ccsMinimizeRoundtrips, alias -> false); int successful = successfulClusters; int skipped = skippedClusters; @@ -252,6 +252,7 @@ static SearchResponse.Clusters createCCSClusterObject( SearchResponse.Cluster update = new SearchResponse.Cluster( cluster.getClusterAlias(), cluster.getIndexExpression(), + false, status, totalShards, successfulShards, @@ -626,4 +627,44 @@ public void testToXContentEmptyClusters() throws IOException { deserialized.getClusters().toXContent(builder, ToXContent.EMPTY_PARAMS); assertEquals(0, Strings.toString(builder).length()); } + + public void testClustersHasRemoteCluster() { + // local cluster search Clusters objects + assertFalse(SearchResponse.Clusters.EMPTY.hasRemoteClusters()); + assertFalse(new SearchResponse.Clusters(1, 1, 0).hasRemoteClusters()); + + // CCS search Cluster objects + + // TODO: this variant of Clusters should not be allowed in a future ticket, but adding to test for now + assertTrue(new SearchResponse.Clusters(3, 2, 1).hasRemoteClusters()); + { + Map remoteClusterIndices = new HashMap<>(); + remoteClusterIndices.put("remote1", new OriginalIndices(new String[] { "*" }, IndicesOptions.LENIENT_EXPAND_OPEN)); + + var c = new SearchResponse.Clusters(null, remoteClusterIndices, randomBoolean(), alias -> randomBoolean()); + assertTrue(c.hasRemoteClusters()); + } + + { + OriginalIndices localIndices = new OriginalIndices(new String[] { "foo*" }, IndicesOptions.LENIENT_EXPAND_OPEN); + + Map remoteClusterIndices = new HashMap<>(); + remoteClusterIndices.put("remote1", new OriginalIndices(new String[] { "*" }, IndicesOptions.LENIENT_EXPAND_OPEN)); + + var c = new SearchResponse.Clusters(localIndices, remoteClusterIndices, randomBoolean(), alias -> randomBoolean()); + assertTrue(c.hasRemoteClusters()); + } + + { + OriginalIndices localIndices = new OriginalIndices(new String[] { "foo*" }, IndicesOptions.LENIENT_EXPAND_OPEN); + + Map remoteClusterIndices = new HashMap<>(); + remoteClusterIndices.put("remote1", new OriginalIndices(new String[] { "*" }, IndicesOptions.LENIENT_EXPAND_OPEN)); + remoteClusterIndices.put("remote2", new OriginalIndices(new String[] { "a*" }, IndicesOptions.LENIENT_EXPAND_OPEN)); + remoteClusterIndices.put("remote3", new OriginalIndices(new String[] { "b*" }, IndicesOptions.LENIENT_EXPAND_OPEN)); + + var c = new SearchResponse.Clusters(localIndices, remoteClusterIndices, randomBoolean(), alias -> randomBoolean()); + assertTrue(c.hasRemoteClusters()); + } + } } diff --git a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java index ca55483c1e7e0..dae91fe883a30 100644 --- a/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/search/TransportSearchActionTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.TotalHits; import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.LatchedActionListener; import org.elasticsearch.action.OriginalIndices; @@ -108,7 +109,6 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; import java.util.function.Function; @@ -523,14 +523,12 @@ public void testCCSRemoteReduceMergeFails() throws Exception { ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true); - TransportSearchAction.ccsRemoteReduce( new TaskId("n", 1), searchRequest, localIndices, remoteIndicesByCluster, - initClusters, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, @@ -591,14 +589,12 @@ public void testCCSRemoteReduce() throws Exception { ActionTestUtils.assertNoFailureListener(response::set), latch ); - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true); - TransportSearchAction.ccsRemoteReduce( new TaskId("n", 1), searchRequest, localIndices, remoteIndicesByCluster, - initClusters, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, @@ -632,13 +628,12 @@ public void testCCSRemoteReduce() throws Exception { ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true); TransportSearchAction.ccsRemoteReduce( new TaskId("n", 1), searchRequest, localIndices, remoteIndicesByCluster, - initClusters, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, @@ -693,14 +688,12 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti ActionListener.wrap(r -> fail("no response expected"), failure::set), latch ); - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true); - TransportSearchAction.ccsRemoteReduce( new TaskId("n", 1), searchRequest, localIndices, remoteIndicesByCluster, - initClusters, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, @@ -741,14 +734,12 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti if (localIndices != null) { clusterAliases.add(""); } - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true); - TransportSearchAction.ccsRemoteReduce( new TaskId("n", 1), searchRequest, localIndices, remoteIndicesByCluster, - initClusters, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, @@ -801,14 +792,12 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti if (localIndices != null) { clusterAliases.add(""); } - SearchResponse.Clusters initClusters = new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true); - TransportSearchAction.ccsRemoteReduce( new TaskId("n", 1), searchRequest, localIndices, remoteIndicesByCluster, - initClusters, + new SearchResponse.Clusters(localIndices, remoteIndicesByCluster, true, alias -> randomBoolean()), timeProvider, emptyReduceContextBuilder(), remoteClusterService, @@ -859,11 +848,12 @@ public void testCollectSearchShards() throws Exception { service.start(); service.acceptIncomingRequests(); + TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0); RemoteClusterService remoteClusterService = service.getRemoteClusterService(); { final CountDownLatch latch = new CountDownLatch(1); AtomicReference> response = new AtomicReference<>(); - AtomicInteger skippedClusters = new AtomicInteger(); + var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true); TransportSearchAction.collectSearchShards( IndicesOptions.lenientExpandOpen(), null, @@ -871,13 +861,13 @@ public void testCollectSearchShards() throws Exception { new MatchAllQueryBuilder(), randomBoolean(), null, - skippedClusters, remoteIndicesByCluster, + clusters, + timeProvider, service, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(response::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); - assertEquals(0, skippedClusters.get()); assertNotNull(response.get()); Map map = response.get(); assertEquals(numClusters, map.size()); @@ -887,11 +877,12 @@ public void testCollectSearchShards() throws Exception { SearchShardsResponse shardsResponse = map.get(clusterAlias); assertThat(shardsResponse.getNodes(), hasSize(1)); } + assertThat(clusters.getSkipped(), equalTo(0)); } { final CountDownLatch latch = new CountDownLatch(1); AtomicReference failure = new AtomicReference<>(); - AtomicInteger skippedClusters = new AtomicInteger(0); + var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true); TransportSearchAction.collectSearchShards( IndicesOptions.lenientExpandOpen(), "index_not_found", @@ -899,13 +890,14 @@ public void testCollectSearchShards() throws Exception { new MatchAllQueryBuilder(), randomBoolean(), null, - skippedClusters, remoteIndicesByCluster, + clusters, + timeProvider, service, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); - assertEquals(0, skippedClusters.get()); + assertEquals(numClusters, clusters.getSkipped()); assertNotNull(failure.get()); assertThat(failure.get(), instanceOf(RemoteTransportException.class)); RemoteTransportException remoteTransportException = (RemoteTransportException) failure.get(); @@ -937,8 +929,8 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti { final CountDownLatch latch = new CountDownLatch(1); - AtomicInteger skippedClusters = new AtomicInteger(0); AtomicReference failure = new AtomicReference<>(); + var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> false); TransportSearchAction.collectSearchShards( IndicesOptions.lenientExpandOpen(), null, @@ -946,13 +938,14 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti new MatchAllQueryBuilder(), randomBoolean(), null, - skippedClusters, remoteIndicesByCluster, + clusters, + timeProvider, service, new LatchedActionListener<>(ActionListener.wrap(r -> fail("no response expected"), failure::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); - assertEquals(0, skippedClusters.get()); + assertEquals(numDisconnectedClusters, clusters.getSkipped()); assertNotNull(failure.get()); assertThat(failure.get(), instanceOf(RemoteTransportException.class)); assertThat(failure.get().getMessage(), containsString("error while communicating with remote cluster [")); @@ -966,8 +959,8 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti { final CountDownLatch latch = new CountDownLatch(1); - AtomicInteger skippedClusters = new AtomicInteger(0); AtomicReference> response = new AtomicReference<>(); + var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true); TransportSearchAction.collectSearchShards( IndicesOptions.lenientExpandOpen(), null, @@ -975,8 +968,9 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti new MatchAllQueryBuilder(), randomBoolean(), null, - skippedClusters, remoteIndicesByCluster, + clusters, + timeProvider, service, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(response::set), latch) ); @@ -984,7 +978,7 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti assertNotNull(response.get()); Map map = response.get(); assertEquals(numClusters - disconnectedNodesIndices.size(), map.size()); - assertEquals(skippedClusters.get(), disconnectedNodesIndices.size()); + assertEquals(disconnectedNodesIndices.size(), clusters.getSkipped()); for (int i = 0; i < numClusters; i++) { String clusterAlias = "remote" + i; if (disconnectedNodesIndices.contains(i)) { @@ -1011,8 +1005,8 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti // run the following under assertBusy as connections are lazily reestablished assertBusy(() -> { final CountDownLatch latch = new CountDownLatch(1); - AtomicInteger skippedClusters = new AtomicInteger(0); AtomicReference> response = new AtomicReference<>(); + var clusters = new SearchResponse.Clusters(null, remoteIndicesByCluster, false, clusterAlias -> true); TransportSearchAction.collectSearchShards( IndicesOptions.lenientExpandOpen(), null, @@ -1020,13 +1014,14 @@ public void onNodeDisconnected(DiscoveryNode node, Transport.Connection connecti new MatchAllQueryBuilder(), randomBoolean(), null, - skippedClusters, remoteIndicesByCluster, + clusters, + timeProvider, service, new LatchedActionListener<>(ActionTestUtils.assertNoFailureListener(response::set), latch) ); awaitLatch(latch, 5, TimeUnit.SECONDS); - assertEquals(0, skippedClusters.get()); + assertEquals(0, clusters.getSkipped()); assertNotNull(response.get()); Map map = response.get(); assertEquals(numClusters, map.size()); @@ -1490,7 +1485,7 @@ public void testLocalShardIteratorFromPointInTime() { true ).stream().filter(si -> si.shardId().equals(anotherShardId)).findFirst(); assertTrue(anotherShardIterator.isPresent()); - assertThat(anotherShardIterator.get().getTargetNodeIds(), hasSize(1)); + assertThat(anotherShardIterator.get().getTargetNodeIds(), hasSize(0)); } public void testCCSCompatibilityCheck() throws Exception { @@ -1500,7 +1495,7 @@ public void testCCSCompatibilityCheck() throws Exception { .build(); ActionFilters actionFilters = mock(ActionFilters.class); when(actionFilters.filters()).thenReturn(new ActionFilter[0]); - TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_CCS_VERSION, true); + TransportVersion transportVersion = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); ThreadPool threadPool = new ThreadPool(settings); try { TransportService transportService = MockTransportService.createNewService( diff --git a/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java b/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java index ca959f47fd105..deaa7e5d31373 100644 --- a/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/ListenerTimeoutsTests.java @@ -17,6 +17,7 @@ import org.junit.Before; import java.io.IOException; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -25,13 +26,16 @@ public class ListenerTimeoutsTests extends ESTestCase { private final TimeValue timeout = TimeValue.timeValueMillis(10); - private final String generic = ThreadPool.Names.GENERIC; private DeterministicTaskQueue taskQueue; + private ThreadPool threadPool; + private Executor timeoutExecutor; @Before public void setUp() throws Exception { super.setUp(); taskQueue = new DeterministicTaskQueue(); + threadPool = taskQueue.getThreadPool(); + timeoutExecutor = threadPool.generic(); } public void testListenerTimeout() { @@ -39,7 +43,7 @@ public void testListenerTimeout() { AtomicReference exception = new AtomicReference<>(); ActionListener listener = wrap(success, exception); - ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(taskQueue.getThreadPool(), listener, timeout, generic, "test"); + ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(threadPool, listener, timeout, timeoutExecutor, "test"); assertTrue(taskQueue.hasDeferredTasks()); taskQueue.advanceTime(); taskQueue.runAllRunnableTasks(); @@ -56,7 +60,7 @@ public void testFinishNormallyBeforeTimeout() { AtomicReference exception = new AtomicReference<>(); ActionListener listener = wrap(success, exception); - ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(taskQueue.getThreadPool(), listener, timeout, generic, "test"); + ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(threadPool, listener, timeout, timeoutExecutor, "test"); wrapped.onResponse(null); wrapped.onFailure(new IOException("boom")); wrapped.onResponse(null); @@ -74,7 +78,7 @@ public void testFinishExceptionallyBeforeTimeout() { AtomicReference exception = new AtomicReference<>(); ActionListener listener = wrap(success, exception); - ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(taskQueue.getThreadPool(), listener, timeout, generic, "test"); + ActionListener wrapped = ListenerTimeouts.wrapWithTimeout(threadPool, listener, timeout, timeoutExecutor, "test"); wrapped.onFailure(new IOException("boom")); assertTrue(taskQueue.hasDeferredTasks()); diff --git a/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java b/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java index db1a5977ffa5a..fd1439f23240e 100644 --- a/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/RetryableActionTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; @@ -38,7 +39,8 @@ public void testRetryableActionNoRetries() { taskQueue.getThreadPool(), TimeValue.timeValueMillis(10), TimeValue.timeValueSeconds(30), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override @@ -69,7 +71,8 @@ public void testRetryableActionWillRetry() { taskQueue.getThreadPool(), TimeValue.timeValueMillis(10), TimeValue.timeValueSeconds(30), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override @@ -116,7 +119,8 @@ public void testRetryableActionTimeout() { taskQueue.getThreadPool(), TimeValue.timeValueMillis(randomFrom(1, 10, randomIntBetween(100, 2000))), TimeValue.timeValueSeconds(1), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override @@ -163,7 +167,8 @@ public void testTimeoutOfZeroMeansNoRetry() { taskQueue.getThreadPool(), TimeValue.timeValueMillis(10), TimeValue.timeValueSeconds(0), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override @@ -192,7 +197,8 @@ public void testFailedBecauseNotRetryable() { taskQueue.getThreadPool(), TimeValue.timeValueMillis(10), TimeValue.timeValueSeconds(30), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override @@ -221,7 +227,8 @@ public void testRetryableActionCancelled() { taskQueue.getThreadPool(), TimeValue.timeValueMillis(10), TimeValue.timeValueSeconds(30), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override @@ -259,7 +266,8 @@ public void testMaxDelayBound() { TimeValue.timeValueMillis(10), TimeValue.timeValueMillis(50), TimeValue.timeValueSeconds(1), - future + future, + EsExecutors.DIRECT_EXECUTOR_SERVICE ) { @Override diff --git a/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java b/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java index bb2763e444e8e..91404b3c72cb7 100644 --- a/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/SubscribableListenerTests.java @@ -322,7 +322,11 @@ public void onFailure(Exception e) { }); try (var ignored = threadPool.getThreadContext().stashContext()) { threadPool.getThreadContext().putHeader(headerName, headerValue); - listener.addTimeout(TimeValue.timeValueSeconds(30), threadPool, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)); + listener.addTimeout( + TimeValue.timeValueSeconds(30), + threadPool, + randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic()) + ); } if (randomBoolean()) { @@ -359,7 +363,11 @@ public void onFailure(Exception e) { fail("should not fail"); } }); - listener.addTimeout(TimeValue.timeValueSeconds(30), threadPool, randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)); + listener.addTimeout( + TimeValue.timeValueSeconds(30), + threadPool, + randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic()) + ); deterministicTaskQueue.scheduleAt( deterministicTaskQueue.getCurrentTimeMillis() + randomLongBetween(0, TimeValue.timeValueSeconds(30).millis() - 1), diff --git a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java index 2e90efd216593..9a0fa2d32e25c 100644 --- a/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/nodes/TransportNodesActionTests.java @@ -384,12 +384,12 @@ private static class TestNodesResponse extends BaseNodesResponse readNodesFrom(StreamInput in) throws IOException { - return in.readList(TestNodeResponse::new); + return in.readCollectionAsList(TestNodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } diff --git a/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java b/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java index 7bfc09fba0b70..73c1c1248d624 100644 --- a/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java +++ b/server/src/test/java/org/elasticsearch/action/support/replication/PendingReplicationActionsTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.RetryableAction; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.shard.IndexShardClosedException; import org.elasticsearch.index.shard.ShardId; @@ -91,7 +92,14 @@ private TestAction(ActionListener listener) { } private TestAction(ActionListener listener, boolean succeed) { - super(logger, threadPool, TimeValue.timeValueMillis(1), TimeValue.timeValueMinutes(1), listener); + super( + logger, + threadPool, + TimeValue.timeValueMillis(1), + TimeValue.timeValueMinutes(1), + listener, + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); this.succeed = succeed; } diff --git a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java index a06895e701ed3..0df492b080254 100644 --- a/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java +++ b/server/src/test/java/org/elasticsearch/action/termvectors/TermVectorsUnitTests.java @@ -25,7 +25,7 @@ import org.apache.lucene.search.TermQuery; import org.apache.lucene.search.TopDocs; import org.apache.lucene.store.Directory; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.termvectors.TermVectorsRequest.Flag; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -109,7 +109,7 @@ private void writeEmptyTermVector(TermVectorsResponse outResponse) throws IOExce TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1); ScoreDoc[] scoreDocs = search.scoreDocs; int doc = scoreDocs[0].doc; - Fields fields = dr.getTermVectors(doc); + Fields fields = s.getIndexReader().termVectors().get(doc); EnumSet flags = EnumSet.of(Flag.Positions, Flag.Offsets); outResponse.setFields(fields, null, flags, fields); outResponse.setExists(true); @@ -144,7 +144,7 @@ private void writeStandardTermVector(TermVectorsResponse outResponse) throws IOE TopDocs search = s.search(new TermQuery(new Term("id", "abc")), 1); ScoreDoc[] scoreDocs = search.scoreDocs; int doc = scoreDocs[0].doc; - Fields termVectors = dr.getTermVectors(doc); + Fields termVectors = s.getIndexReader().termVectors().get(doc); EnumSet flags = EnumSet.of(Flag.Positions, Flag.Offsets); outResponse.setFields(termVectors, null, flags, termVectors); dr.close(); @@ -261,7 +261,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // write using older version which contains types ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(TransportVersion.V_7_2_0); + out.setTransportVersion(TransportVersions.V_7_2_0); request.writeTo(out); // First check the type on the stream was written as "_doc" by manually parsing the stream until the type @@ -277,7 +277,7 @@ public void testStreamRequestLegacyVersion() throws IOException { // now read the stream as normal to check it is parsed correct if received from an older node esInBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); esBuffer = new InputStreamStreamInput(esInBuffer); - esBuffer.setTransportVersion(TransportVersion.V_7_2_0); + esBuffer.setTransportVersion(TransportVersions.V_7_2_0); TermVectorsRequest req2 = new TermVectorsRequest(esBuffer); assertThat(request.offsets(), equalTo(req2.offsets())); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java index 32211254f9082..e3c5865333b94 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterStateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.cluster.block.ClusterBlock; @@ -21,20 +22,25 @@ import org.elasticsearch.cluster.metadata.IndexWriteLoad; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataTests; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.TestShardRouting; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.gateway.GatewayService; +import org.elasticsearch.health.metadata.HealthMetadata; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -54,6 +60,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.function.Function; import static java.util.Collections.emptySet; import static java.util.Collections.singletonMap; @@ -132,12 +139,7 @@ public void testToXContent() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder(); builder.startObject(); - writeChunks( - clusterState, - builder, - new ToXContent.MapParams(singletonMap(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API)), - 41 - ); + writeChunks(clusterState, builder, new ToXContent.MapParams(singletonMap(Metadata.CONTEXT_MODE_PARAM, Metadata.CONTEXT_MODE_API))); builder.endObject(); assertEquals( @@ -212,6 +214,12 @@ public void testToXContent() throws IOException { "transport_version" : "%s" } ], + "nodes_versions" : [ + { + "node_id" : "nodeId1", + "transport_version" : "%s" + } + ], "metadata": { "cluster_uuid": "clusterUUID", "cluster_uuid_committed": false, @@ -365,6 +373,7 @@ public void testToXContent() throws IOException { IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), + TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), allocationId, @@ -393,7 +402,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); - writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams), 41); + writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams)); builder.endObject(); assertEquals( @@ -467,6 +476,12 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti "transport_version" : "%s" } ], + "nodes_versions" : [ + { + "node_id" : "nodeId1", + "transport_version" : "%s" + } + ], "metadata" : { "cluster_uuid" : "clusterUUID", "cluster_uuid_committed" : false, @@ -616,6 +631,7 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), + TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), allocationId, @@ -644,7 +660,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); - writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams), 41); + writeChunks(clusterState, builder, new ToXContent.MapParams(mapParams)); builder.endObject(); assertEquals( @@ -718,6 +734,12 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti "transport_version" : "%s" } ], + "nodes_versions" : [ + { + "node_id" : "nodeId1", + "transport_version" : "%s" + } + ], "metadata" : { "cluster_uuid" : "clusterUUID", "cluster_uuid_committed" : false, @@ -873,6 +895,7 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current(), TransportVersion.current(), + TransportVersion.current(), IndexVersion.current(), IndexVersion.current(), allocationId, @@ -919,7 +942,7 @@ public void testToXContentSameTypeName() throws IOException { XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint(); builder.startObject(); - writeChunks(clusterState, builder, ToXContent.EMPTY_PARAMS, 27); + writeChunks(clusterState, builder, ToXContent.EMPTY_PARAMS); builder.endObject(); assertEquals(Strings.format(""" @@ -931,6 +954,7 @@ public void testToXContentSameTypeName() throws IOException { "blocks" : { }, "nodes" : { }, "transport_versions" : [ ], + "nodes_versions" : [ ], "metadata" : { "cluster_uuid" : "clusterUUID", "cluster_uuid_committed" : false, @@ -989,7 +1013,7 @@ public void testToXContentSameTypeName() throws IOException { "unassigned" : [ ], "nodes" : { } } - }""", Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current()), Strings.toString(builder)); } private ClusterState buildClusterState() throws IOException { @@ -1113,19 +1137,18 @@ public void testNodesIfRecovered() throws IOException { assertEquals(DiscoveryNodes.EMPTY_NODES, notRecoveredState.nodesIfRecovered()); } - private static void writeChunks(ClusterState clusterState, XContentBuilder builder, ToXContent.Params params, int expectedChunks) - throws IOException { + private static void writeChunks(ClusterState clusterState, XContentBuilder builder, ToXContent.Params params) throws IOException { final var iterator = clusterState.toXContentChunked(params); int chunks = 0; while (iterator.hasNext()) { iterator.next().toXContent(builder, params); chunks += 1; } - assertEquals(expectedChunks, chunks); + assertEquals(expectedChunkCount(params, clusterState), chunks); } public void testGetMinTransportVersion() throws IOException { - assertEquals(TransportVersion.MINIMUM_COMPATIBLE, ClusterState.EMPTY_STATE.getMinTransportVersion()); + assertEquals(TransportVersions.MINIMUM_COMPATIBLE, ClusterState.EMPTY_STATE.getMinTransportVersion()); var builder = ClusterState.builder(buildClusterState()); int numNodes = randomIntBetween(2, 20); @@ -1141,11 +1164,81 @@ public void testGetMinTransportVersion() throws IOException { assertThat(newState.getMinTransportVersion(), equalTo(minVersion)); assertEquals( - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, ClusterState.builder(newState) .blocks(ClusterBlocks.builder().addGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK)) .build() .getMinTransportVersion() ); } + + public static int expectedChunkCount(ToXContent.Params params, ClusterState clusterState) { + final var metrics = ClusterState.Metric.parseString(params.param("metric", "_all"), true); + + int chunkCount = 0; + + // header chunk + chunkCount += 1; + + // blocks + if (metrics.contains(ClusterState.Metric.BLOCKS)) { + chunkCount += 2 + clusterState.blocks().indices().size(); + } + + // nodes, transport_versions, nodes_versions + if (metrics.contains(ClusterState.Metric.NODES)) { + chunkCount += 6 + clusterState.nodes().size() + 2 * clusterState.compatibilityVersions().size(); + } + + // metadata + if (metrics.contains(ClusterState.Metric.METADATA)) { + chunkCount += MetadataTests.expectedChunkCount(params, clusterState.metadata()); + } + + // routing table + if (metrics.contains(ClusterState.Metric.ROUTING_TABLE)) { + chunkCount += 2; + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + chunkCount += 2; + for (int shardId = 0; shardId < indexRoutingTable.size(); shardId++) { + chunkCount += 2 + indexRoutingTable.shard(shardId).size(); + } + } + } + + // routing nodes + if (metrics.contains(ClusterState.Metric.ROUTING_NODES)) { + final var routingNodes = clusterState.getRoutingNodes(); + chunkCount += 4 + routingNodes.unassigned().size(); + for (RoutingNode routingNode : routingNodes) { + chunkCount += 2 + routingNode.size(); + } + } + + // customs + if (metrics.contains(ClusterState.Metric.CUSTOMS)) { + for (ClusterState.Custom custom : clusterState.customs().values()) { + chunkCount += 2; + + if (custom instanceof HealthMetadata) { + chunkCount += 1; + } else if (custom instanceof RepositoryCleanupInProgress repositoryCleanupInProgress) { + chunkCount += 2 + repositoryCleanupInProgress.entries().size(); + } else if (custom instanceof RestoreInProgress restoreInProgress) { + chunkCount += 2 + Iterables.size(restoreInProgress); + } else if (custom instanceof SnapshotDeletionsInProgress snapshotDeletionsInProgress) { + chunkCount += 2 + snapshotDeletionsInProgress.getEntries().size(); + } else if (custom instanceof SnapshotsInProgress snapshotsInProgress) { + chunkCount += 2 + snapshotsInProgress.asStream().count(); + } else { + // could be anything, we have to just try it + chunkCount += Iterables.size( + (Iterable) (() -> Iterators.map(custom.toXContentChunked(params), Function.identity())) + ); + } + } + } + + return chunkCount; + } } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index 3bae8dc1f8589..3fc62981b75ba 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.core.LogEvent; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.SimpleDiffable; @@ -1553,7 +1554,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java index 75d112c047ee0..76e04db308369 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ElectionSchedulerFactoryTests.java @@ -223,6 +223,8 @@ public void testSettingsValidation() { assertThat(e.getMessage(), is("failed to parse value [601s] for setting [cluster.election.max_timeout], must be <= [600s]")); } + final var threadPool = new DeterministicTaskQueue().getThreadPool(); + { final long initialTimeoutMillis = randomLongBetween(1, 10000); final long backOffMillis = randomLongBetween(1, 60000); @@ -238,7 +240,7 @@ public void testSettingsValidation() { assertThat(ELECTION_BACK_OFF_TIME_SETTING.get(settings), is(TimeValue.timeValueMillis(backOffMillis))); assertThat(ELECTION_MAX_TIMEOUT_SETTING.get(settings), is(TimeValue.timeValueMillis(maxTimeoutMillis))); - assertThat(new ElectionSchedulerFactory(settings, random(), null), not(nullValue())); // doesn't throw an IAE + assertThat(new ElectionSchedulerFactory(settings, random(), threadPool), not(nullValue())); // doesn't throw an IAE } { @@ -252,7 +254,7 @@ public void testSettingsValidation() { IllegalArgumentException e = expectThrows( IllegalArgumentException.class, - () -> new ElectionSchedulerFactory(settings, random(), null) + () -> new ElectionSchedulerFactory(settings, random(), threadPool) ); assertThat( e.getMessage(), diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java index 968b9cc5558c1..db996ec397716 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/JoinHelperTests.java @@ -10,13 +10,13 @@ import org.apache.logging.log4j.Level; import org.elasticsearch.Build; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.NotMasterException; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; @@ -90,7 +90,8 @@ public void testJoinDeduplication() { new JoinReasonService(() -> 0L), new NoneCircuitBreakerService(), Function.identity(), - (listener, term) -> listener.onResponse(null) + (listener, term) -> listener.onResponse(null), + CompatibilityVersionsUtils.staticCurrent() ); transportService.start(); @@ -256,7 +257,8 @@ public void testJoinFailureOnUnhealthyNodes() { new JoinReasonService(() -> 0L), new NoneCircuitBreakerService(), Function.identity(), - (listener, term) -> listener.onResponse(null) + (listener, term) -> listener.onResponse(null), + CompatibilityVersionsUtils.staticCurrent() ); transportService.start(); @@ -331,12 +333,13 @@ public void testLatestStoredStateFailure() { new JoinReasonService(() -> 0L), new NoneCircuitBreakerService(), Function.identity(), - (listener, term) -> listener.onFailure(new ElasticsearchException("simulated")) + (listener, term) -> listener.onFailure(new ElasticsearchException("simulated")), + CompatibilityVersionsUtils.staticCurrent() ); final var joinAccumulator = joinHelper.new CandidateJoinAccumulator(); final var joinListener = new PlainActionFuture(); - joinAccumulator.handleJoinRequest(localNode, TransportVersion.current(), joinListener); + joinAccumulator.handleJoinRequest(localNode, CompatibilityVersionsUtils.staticCurrent(), joinListener); assert joinListener.isDone() == false; final var mockAppender = new MockLogAppender(); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java index 61bc7df4e20f7..676914ec0bed2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/MessagesTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.EqualsHashCodeTestUtils; @@ -242,7 +243,7 @@ public void testJoinRequestEqualsHashCodeSerialization() { ); JoinRequest initialJoinRequest = new JoinRequest( initialJoin.getSourceNode(), - TransportVersionUtils.randomVersion(), + new CompatibilityVersions(TransportVersionUtils.randomVersion()), randomNonNegativeLong(), randomBoolean() ? Optional.empty() : Optional.of(initialJoin) ); @@ -254,21 +255,23 @@ public void testJoinRequestEqualsHashCodeSerialization() { if (randomBoolean() && joinRequest.getOptionalJoin().isPresent() == false) { return new JoinRequest( createNode(randomAlphaOfLength(10)), - joinRequest.getTransportVersion(), + joinRequest.getCompatibilityVersions(), joinRequest.getMinimumTerm(), joinRequest.getOptionalJoin() ); } else if (randomBoolean()) { return new JoinRequest( joinRequest.getSourceNode(), - TransportVersionUtils.randomVersion(Set.of(joinRequest.getTransportVersion())), + new CompatibilityVersions( + TransportVersionUtils.randomVersion(Set.of(joinRequest.getCompatibilityVersions().transportVersion())) + ), joinRequest.getMinimumTerm(), joinRequest.getOptionalJoin() ); } else if (randomBoolean()) { return new JoinRequest( joinRequest.getSourceNode(), - joinRequest.getTransportVersion(), + joinRequest.getCompatibilityVersions(), randomValueOtherThan(joinRequest.getMinimumTerm(), ESTestCase::randomNonNegativeLong), joinRequest.getOptionalJoin() ); @@ -290,7 +293,7 @@ public void testJoinRequestEqualsHashCodeSerialization() { } return new JoinRequest( joinRequest.getSourceNode(), - joinRequest.getTransportVersion(), + joinRequest.getCompatibilityVersions(), joinRequest.getMinimumTerm(), newOptionalJoin ); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java index ad89288efd46a..4807d5ee984ca 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinExecutorTests.java @@ -28,6 +28,8 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.Priority; import org.elasticsearch.common.ReferenceDocs; import org.elasticsearch.common.UUIDs; @@ -57,6 +59,7 @@ import static org.elasticsearch.test.VersionUtils.randomVersionBetween; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; @@ -159,21 +162,24 @@ public void testPreventJoinClusterWithUnsupportedTransportVersion() { .mapToObj(i -> TransportVersionUtils.randomCompatibleVersion(random())) .toList(); TransportVersion min = Collections.min(versions); + List compatibilityVersions = versions.stream().map(CompatibilityVersions::new).toList(); // should not throw NodeJoinExecutor.ensureTransportVersionBarrier( - TransportVersionUtils.randomVersionBetween(random(), min, TransportVersion.current()), - versions + new CompatibilityVersions(TransportVersionUtils.randomVersionBetween(random(), min, TransportVersion.current())), + compatibilityVersions ); expectThrows( IllegalStateException.class, () -> NodeJoinExecutor.ensureTransportVersionBarrier( - TransportVersionUtils.randomVersionBetween( - random(), - TransportVersionUtils.getFirstVersion(), - TransportVersionUtils.getPreviousVersion(min) + new CompatibilityVersions( + TransportVersionUtils.randomVersionBetween( + random(), + TransportVersionUtils.getFirstVersion(), + TransportVersionUtils.getPreviousVersion(min) + ) ), - versions + compatibilityVersions ) ); } @@ -249,7 +255,7 @@ public void testUpdatesNodeWithNewRoles() throws Exception { final var resultingState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( clusterState, executor, - List.of(JoinTask.singleNode(actualNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, 0L)) + List.of(JoinTask.singleNode(actualNode, CompatibilityVersionsUtils.staticCurrent(), TEST_REASON, NOT_COMPLETED_LISTENER, 0L)) ); assertThat(resultingState.getNodes().get(actualNode.getId()).getRoles(), equalTo(actualNode.getRoles())); @@ -280,12 +286,23 @@ public void testRejectsStatesWithStaleTerm() { executor, randomBoolean() ? List.of( - JoinTask.singleNode(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, executorTerm) + JoinTask.singleNode( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER, + executorTerm + ) ) : List.of( JoinTask.completingElection( Stream.of( - new JoinTask.NodeJoinTask(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER) + new JoinTask.NodeJoinTask( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ) ), executorTerm ) @@ -331,12 +348,23 @@ public void testRejectsStatesWithOtherMaster() { executor, randomBoolean() ? List.of( - JoinTask.singleNode(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, executorTerm) + JoinTask.singleNode( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER, + executorTerm + ) ) : List.of( JoinTask.completingElection( Stream.of( - new JoinTask.NodeJoinTask(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER) + new JoinTask.NodeJoinTask( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ) ), executorTerm ) @@ -372,7 +400,15 @@ public void testRejectsStatesWithNoMasterIfNotBecomingMaster() { () -> ClusterStateTaskExecutorUtils.executeHandlingResults( clusterState, executor, - List.of(JoinTask.singleNode(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, executorTerm)), + List.of( + JoinTask.singleNode( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER, + executorTerm + ) + ), t -> fail("should not succeed"), (t, e) -> assertThat(e, instanceOf(NotMasterException.class)) ) @@ -416,8 +452,18 @@ public void testRemovesOlderNodeInstancesWhenBecomingMaster() throws Exception { List.of( JoinTask.completingElection( Stream.of( - new JoinTask.NodeJoinTask(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER), - new JoinTask.NodeJoinTask(otherNodeNew, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER) + new JoinTask.NodeJoinTask( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ), + new JoinTask.NodeJoinTask( + otherNodeNew, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ) ), executorTerm ) @@ -438,8 +484,20 @@ public void testRemovesOlderNodeInstancesWhenBecomingMaster() throws Exception { afterElectionClusterState, executor, List.of( - JoinTask.singleNode(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, executorTerm), - JoinTask.singleNode(otherNodeOld, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, executorTerm) + JoinTask.singleNode( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER, + executorTerm + ), + JoinTask.singleNode( + otherNodeOld, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER, + executorTerm + ) ) ).nodes().get(otherNodeNew.getId()).getEphemeralId(), equalTo(otherNodeNew.getEphemeralId()) @@ -489,8 +547,18 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { List.of( JoinTask.completingElection( Stream.of( - new JoinTask.NodeJoinTask(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER), - new JoinTask.NodeJoinTask(otherNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER) + new JoinTask.NodeJoinTask( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ), + new JoinTask.NodeJoinTask( + otherNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ) ), executorTerm ) @@ -502,7 +570,14 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { executor, List.of( JoinTask.completingElection( - Stream.of(new JoinTask.NodeJoinTask(masterNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER)), + Stream.of( + new JoinTask.NodeJoinTask( + masterNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ) + ), executorTerm ) ) @@ -510,7 +585,15 @@ public void testUpdatesVotingConfigExclusionsIfNeeded() throws Exception { clusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( clusterState, executor, - List.of(JoinTask.singleNode(otherNode, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, executorTerm)) + List.of( + JoinTask.singleNode( + otherNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER, + executorTerm + ) + ) ); } @@ -575,7 +658,7 @@ public void testDesiredNodesMembershipIsUpgradedWhenNewNodesJoin() throws Except final var desiredNodes = DesiredNodes.latestFromClusterState(clusterState); var tasks = joiningNodes.stream() - .map(node -> JoinTask.singleNode(node, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, 0L)) + .map(node -> JoinTask.singleNode(node, CompatibilityVersionsUtils.staticCurrent(), TEST_REASON, NOT_COMPLETED_LISTENER, 0L)) .toList(); final var updatedClusterState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful(clusterState, executor, tasks); @@ -611,7 +694,9 @@ public void testDesiredNodesMembershipIsUpgradedWhenANewMasterIsElected() throws final var completingElectionTask = JoinTask.completingElection( clusterState.nodes() .stream() - .map(node -> new JoinTask.NodeJoinTask(node, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER)), + .map( + node -> new JoinTask.NodeJoinTask(node, CompatibilityVersionsUtils.staticCurrent(), TEST_REASON, NOT_COMPLETED_LISTENER) + ), 1L ); @@ -663,7 +748,11 @@ public void testPerNodeLogging() { PlainActionFuture.get( future -> clusterService.getMasterService() .createTaskQueue("test", Priority.NORMAL, executor) - .submitTask("test", JoinTask.singleNode(node1, TransportVersion.current(), TEST_REASON, future, 0L), null), + .submitTask( + "test", + JoinTask.singleNode(node1, CompatibilityVersionsUtils.staticCurrent(), TEST_REASON, future, 0L), + null + ), 10, TimeUnit.SECONDS ) @@ -688,7 +777,11 @@ public void testPerNodeLogging() { PlainActionFuture.get( future -> clusterService.getMasterService() .createTaskQueue("test", Priority.NORMAL, executor) - .submitTask("test", JoinTask.singleNode(node2, TransportVersion.current(), testReasonWithLink, future, 0L), null), + .submitTask( + "test", + JoinTask.singleNode(node2, CompatibilityVersionsUtils.staticCurrent(), testReasonWithLink, future, 0L), + null + ), 10, TimeUnit.SECONDS ) @@ -699,6 +792,46 @@ public void testPerNodeLogging() { } } + public void testResetsNodeLeftGenerationOnNewTerm() throws Exception { + final AllocationService allocationService = createAllocationService(); + when(allocationService.adaptAutoExpandReplicas(any())).then(invocationOnMock -> invocationOnMock.getArguments()[0]); + final RerouteService rerouteService = (reason, priority, listener) -> listener.onResponse(null); + + final NodeJoinExecutor executor = new NodeJoinExecutor(allocationService, rerouteService); + + final long term = randomLongBetween(0, Long.MAX_VALUE - 1); + final DiscoveryNode masterNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + final DiscoveryNode otherNode = DiscoveryNodeUtils.create(UUIDs.base64UUID()); + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .metadata(Metadata.builder().coordinationMetadata(CoordinationMetadata.builder().term(term).build())) + .nodes(DiscoveryNodes.builder().add(masterNode).localNodeId(masterNode.getId()).add(otherNode).remove(otherNode)) + .build(); + + assertEquals(term, clusterState.term()); + assertEquals(1L, clusterState.nodes().getNodeLeftGeneration()); + + final var resultingState = ClusterStateTaskExecutorUtils.executeAndAssertSuccessful( + clusterState, + executor, + List.of( + JoinTask.completingElection( + Stream.of( + new JoinTask.NodeJoinTask( + otherNode, + CompatibilityVersionsUtils.staticCurrent(), + TEST_REASON, + NOT_COMPLETED_LISTENER + ) + ), + randomLongBetween(term + 1, Long.MAX_VALUE) + ) + ) + ); + + assertThat(resultingState.term(), greaterThan(term)); + assertEquals(0L, resultingState.nodes().getNodeLeftGeneration()); + } + private DesiredNodeWithStatus createActualizedDesiredNode() { return new DesiredNodeWithStatus(randomDesiredNode(), DesiredNodeWithStatus.Status.ACTUALIZED); } @@ -709,9 +842,9 @@ private DesiredNodeWithStatus createPendingDesiredNode() { private static JoinTask createRandomTask(DiscoveryNode node, long term) { return randomBoolean() - ? JoinTask.singleNode(node, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER, term) + ? JoinTask.singleNode(node, CompatibilityVersionsUtils.staticCurrent(), TEST_REASON, NOT_COMPLETED_LISTENER, term) : JoinTask.completingElection( - Stream.of(new JoinTask.NodeJoinTask(node, TransportVersion.current(), TEST_REASON, NOT_COMPLETED_LISTENER)), + Stream.of(new JoinTask.NodeJoinTask(node, CompatibilityVersionsUtils.staticCurrent(), TEST_REASON, NOT_COMPLETED_LISTENER)), term ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java index 3206cdccc1467..a068029667eb2 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeJoinTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.coordination; import org.elasticsearch.Build; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; @@ -24,6 +23,8 @@ import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.service.MasterServiceTests; +import org.elasticsearch.cluster.version.CompatibilityVersions; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -36,7 +37,6 @@ import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.TransportVersionUtils; import org.elasticsearch.test.transport.CapturingTransport; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -229,7 +229,8 @@ protected void onSendRequest(long requestId, String action, TransportRequest req new NoneCircuitBreakerService(), new Reconfigurator(Settings.EMPTY, clusterSettings), LeaderHeartbeatService.NO_OP, - StatefulPreVoteCollector::new + StatefulPreVoteCollector::new, + CompatibilityVersionsUtils.staticCurrent() ); transportService.start(); transportService.acceptIncomingRequests(); @@ -305,7 +306,7 @@ private void joinNodeAndRun(final JoinRequest joinRequest) { public void testJoinWithHigherTermElectsLeader() { DiscoveryNode node0 = newNode(0, true); DiscoveryNode node1 = newNode(1, true); - TransportVersion version1 = TransportVersionUtils.randomVersion(); + CompatibilityVersions version1 = CompatibilityVersionsUtils.staticRandom(); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); setupFakeMasterServiceAndCoordinator( @@ -330,7 +331,7 @@ public void testJoinWithHigherTermElectsLeader() { public void testJoinWithHigherTermButBetterStateGetsRejected() { DiscoveryNode node0 = newNode(0, true); DiscoveryNode node1 = newNode(1, true); - TransportVersion version1 = TransportVersionUtils.randomVersion(); + CompatibilityVersions version1 = CompatibilityVersionsUtils.staticRandom(); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); setupFakeMasterServiceAndCoordinator( @@ -366,7 +367,7 @@ public void testJoinWithHigherTermButBetterStateStillElectsMasterThroughSelfJoin joinNodeAndRun( new JoinRequest( node1, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, higherVersion)) ) @@ -389,7 +390,7 @@ public void testJoinElectedLeader() { joinNodeAndRun( new JoinRequest( node0, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)) ) @@ -399,7 +400,7 @@ public void testJoinElectedLeader() { joinNodeAndRun( new JoinRequest( node1, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion)) ) @@ -423,7 +424,7 @@ public void testJoinElectedLeaderWithHigherTerm() { joinNodeAndRun( new JoinRequest( node0, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)) ) @@ -431,7 +432,7 @@ public void testJoinElectedLeaderWithHigherTerm() { assertTrue(isLocalNodeElectedMaster()); long newerTerm = newTerm + randomLongBetween(1, 10); - joinNodeAndRun(new JoinRequest(node1, TransportVersion.current(), newerTerm, Optional.empty())); + joinNodeAndRun(new JoinRequest(node1, CompatibilityVersionsUtils.staticCurrent(), newerTerm, Optional.empty())); assertThat(coordinator.getCurrentTerm(), greaterThanOrEqualTo(newerTerm)); assertTrue(isLocalNodeElectedMaster()); } @@ -452,7 +453,7 @@ public void testJoinAccumulation() { Future futNode0 = joinNodeAsync( new JoinRequest( node0, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node0, node0, newTerm, initialTerm, initialVersion)) ) @@ -463,7 +464,7 @@ public void testJoinAccumulation() { Future futNode1 = joinNodeAsync( new JoinRequest( node1, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node1, node0, newTerm, initialTerm, initialVersion)) ) @@ -474,7 +475,7 @@ public void testJoinAccumulation() { joinNodeAndRun( new JoinRequest( node2, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node2, node0, newTerm, initialTerm, initialVersion)) ) @@ -503,7 +504,7 @@ public void testJoinFollowerWithHigherTerm() throws Exception { joinNodeAndRun( new JoinRequest( node1, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newerTerm, Optional.of(new Join(node1, node0, newerTerm, initialTerm, initialVersion)) ) @@ -537,7 +538,7 @@ public void testJoinUpdateVotingConfigExclusion() throws Exception { joinNodeAndRun( new JoinRequest( knownJoiningNode, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), initialTerm, Optional.of(new Join(knownJoiningNode, initialNode, newerTerm, initialTerm, initialVersion)) ) @@ -612,7 +613,7 @@ public void onFailure(Exception e) { public void testJoinFollowerFails() throws Exception { DiscoveryNode node0 = newNode(0, true); DiscoveryNode node1 = newNode(1, true); - TransportVersion version1 = TransportVersionUtils.randomVersion(); + CompatibilityVersions version1 = CompatibilityVersionsUtils.staticRandom(); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); setupFakeMasterServiceAndCoordinator( @@ -636,7 +637,7 @@ public void testJoinFollowerFails() throws Exception { public void testBecomeFollowerFailsPendingJoin() throws Exception { DiscoveryNode node0 = newNode(0, true); DiscoveryNode node1 = newNode(1, true); - TransportVersion version0 = TransportVersionUtils.randomVersion(); + CompatibilityVersions version0 = CompatibilityVersionsUtils.staticRandom(); long initialTerm = randomLongBetween(1, 10); long initialVersion = randomLongBetween(1, 10); setupFakeMasterServiceAndCoordinator( @@ -696,7 +697,7 @@ public void testConcurrentJoining() { .map( node -> new JoinRequest( node, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion)) ) @@ -713,7 +714,7 @@ public void testConcurrentJoining() { // a correct request return new JoinRequest( node, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion)) ); @@ -721,7 +722,7 @@ public void testConcurrentJoining() { // term too low return new JoinRequest( node, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node, localNode, randomLongBetween(0, initialTerm), initialTerm, initialVersion)) ); @@ -729,7 +730,7 @@ public void testConcurrentJoining() { // better state return new JoinRequest( node, - TransportVersion.current(), + CompatibilityVersionsUtils.staticCurrent(), newTerm, Optional.of(new Join(node, localNode, newTerm, initialTerm, initialVersion + randomLongBetween(1, 10))) ); diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java index 8d794786881da..4f03cbe3a1fc0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/NodeLeftExecutorTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.coordination; import org.apache.logging.log4j.Level; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.Priority; import org.elasticsearch.test.ClusterServiceUtils; import org.elasticsearch.test.ESTestCase; @@ -72,9 +72,11 @@ public void testRerouteAfterRemovingNodes() throws Exception { protected ClusterState remainingNodesClusterState( ClusterState currentState, DiscoveryNodes.Builder remainingNodesBuilder, - Map transportVersions + Map compatibilityVersions ) { - remainingNodesClusterState.set(super.remainingNodesClusterState(currentState, remainingNodesBuilder, transportVersions)); + remainingNodesClusterState.set( + super.remainingNodesClusterState(currentState, remainingNodesBuilder, compatibilityVersions) + ); return remainingNodesClusterState.get(); } }; diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java index 7e11d33a2a396..d266988f0123d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/PublicationTransportHandlerTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.ClusterState; @@ -235,7 +236,7 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { allNodes.add(node); nodeTransports.put( node, - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.MINIMUM_COMPATIBLE, TransportVersion.current()) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current()) ); } diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/CapturingThreadPool.java b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/CapturingThreadPool.java index 5ee2355b5a7db..44cc09cf26c83 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/CapturingThreadPool.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/stateless/CapturingThreadPool.java @@ -14,6 +14,7 @@ import java.util.ArrayDeque; import java.util.Deque; +import java.util.concurrent.Executor; class CapturingThreadPool extends TestThreadPool { final Deque> scheduledTasks = new ArrayDeque<>(); @@ -23,7 +24,7 @@ class CapturingThreadPool extends TestThreadPool { } @Override - public ScheduledCancellable schedule(Runnable task, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable task, TimeValue delay, Executor executor) { scheduledTasks.add(new Tuple<>(delay, task)); return null; } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java index 17bfabf509848..8b9317fe6caea 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/AutoExpandReplicasTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteRequest; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; @@ -237,7 +238,7 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { localNode, localNode, allNodes.toArray(new DiscoveryNode[0]), - TransportVersion.V_7_0_0 + TransportVersions.V_7_0_0 ); CreateIndexRequest request = new CreateIndexRequest( @@ -259,7 +260,7 @@ public void testOnlyAutoExpandAllocationFilteringAfterAllNodesUpgraded() { // is the // master - state = cluster.addNode(state, newNode, TransportVersion.V_7_6_0); + state = cluster.addNode(state, newNode, TransportVersions.V_7_6_0); // use allocation filtering state = cluster.updateSettings( diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java index b9224c5227216..d10e44e3dd1fa 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DataStreamTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.metadata; import org.apache.lucene.tests.util.LuceneTestCase; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.RolloverConfiguration; import org.elasticsearch.action.admin.indices.rollover.RolloverConfigurationTests; @@ -770,7 +769,7 @@ public void testValidate() { Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build() ) .build() @@ -789,7 +788,7 @@ public void testValidate() { Settings.builder() .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) .put(IndexSettings.TIME_SERIES_START_TIME.getKey(), start3.toEpochMilli()) .put(IndexSettings.TIME_SERIES_END_TIME.getKey(), end3.toEpochMilli()) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index fe877bac66b7e..393c39e336fed 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -19,12 +19,15 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Iterators; +import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.Nullable; @@ -35,10 +38,13 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.alias.RandomAliasActionsGenerator; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.ingest.IngestMetadata; +import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.upgrades.FeatureMigrationResults; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -60,6 +66,7 @@ import java.util.Set; import java.util.SortedMap; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -67,6 +74,8 @@ import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.createFirstBackingIndex; import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.newInstance; import static org.elasticsearch.cluster.metadata.Metadata.Builder.assertDataStreams; +import static org.elasticsearch.cluster.metadata.Metadata.CONTEXT_MODE_API; +import static org.elasticsearch.cluster.metadata.Metadata.CONTEXT_MODE_PARAM; import static org.elasticsearch.test.LambdaMatchers.transformedItemsMatch; import static org.hamcrest.Matchers.aMapWithSize; import static org.hamcrest.Matchers.allOf; @@ -2149,41 +2158,65 @@ public void testEmptyDiffReturnsSameInstance() throws IOException { assertSame(instance, deserializedDiff.apply(instance)); } - public void testChunkedToXContent() throws IOException { - final int datastreams = randomInt(10); + public void testChunkedToXContent() { + AbstractChunkedSerializingTestCase.assertChunkCount(randomMetadata(randomInt(10)), MetadataTests::expectedChunkCount); + } + + private static int expectedChunkCount(Metadata metadata) { + return expectedChunkCount(ToXContent.EMPTY_PARAMS, metadata); + } + + public static int expectedChunkCount(ToXContent.Params params, Metadata metadata) { + final var context = Metadata.XContentContext.valueOf(params.param(CONTEXT_MODE_PARAM, CONTEXT_MODE_API)); + // 2 chunks at the beginning + int chunkCount = 2; + // 1 optional chunk for persistent settings + if (context != Metadata.XContentContext.API && metadata.persistentSettings().isEmpty() == false) { + chunkCount += 1; + } + // 2 chunks wrapping templates and one chunk per template + chunkCount += 2 + metadata.templates().size(); // 1 chunk for each index + 2 to wrap the indices field + chunkCount += 2 + metadata.indices().size(); + + for (Metadata.Custom custom : metadata.customs().values()) { + chunkCount += 2; + + if (custom instanceof ComponentTemplateMetadata componentTemplateMetadata) { + chunkCount += 2 + componentTemplateMetadata.componentTemplates().size(); + } else if (custom instanceof ComposableIndexTemplateMetadata composableIndexTemplateMetadata) { + chunkCount += 2 + composableIndexTemplateMetadata.indexTemplates().size(); + } else if (custom instanceof DataStreamMetadata dataStreamMetadata) { + chunkCount += 4 + dataStreamMetadata.dataStreams().size() + dataStreamMetadata.getDataStreamAliases().size(); + } else if (custom instanceof DesiredNodesMetadata) { + chunkCount += 1; + } else if (custom instanceof FeatureMigrationResults featureMigrationResults) { + chunkCount += 2 + featureMigrationResults.getFeatureStatuses().size(); + } else if (custom instanceof IndexGraveyard indexGraveyard) { + chunkCount += 2 + indexGraveyard.getTombstones().size(); + } else if (custom instanceof IngestMetadata ingestMetadata) { + chunkCount += 2 + ingestMetadata.getPipelines().size(); + } else if (custom instanceof NodesShutdownMetadata nodesShutdownMetadata) { + chunkCount += 2 + nodesShutdownMetadata.getAll().size(); + } else if (custom instanceof PersistentTasksCustomMetadata persistentTasksCustomMetadata) { + chunkCount += 3 + persistentTasksCustomMetadata.tasks().size(); + } else if (custom instanceof RepositoriesMetadata repositoriesMetadata) { + chunkCount += repositoriesMetadata.repositories().size(); + } else { + // could be anything, we have to just try it + chunkCount += Iterables.size( + (Iterable) (() -> Iterators.map(custom.toXContentChunked(params), Function.identity())) + ); + } + } + // 2 chunks for wrapping reserved state + 1 chunk for each item - // 2 chunks wrapping templates and one chunk per template - // 2 chunks to wrap each custom - // 1 chunk per datastream, 4 chunks to wrap ds and ds-aliases, or 0 if there are no datastreams - // 2 chunks to wrap index graveyard and one per tombstone - // 2 chunks to wrap component templates and one per component template - // 2 chunks to wrap v2 templates and one per v2 template + chunkCount += 2 + metadata.reservedStateMetadata().size(); // 1 chunk to close metadata - AbstractChunkedSerializingTestCase.assertChunkCount(randomMetadata(datastreams), instance -> { - // 2 chunks at the beginning - // 1 chunk for each index + 2 to wrap the indices field - final int indicesChunks = instance.indices().size() + 2; - // 2 chunks for wrapping reserved state + 1 chunk for each item - final int reservedStateChunks = instance.reservedStateMetadata().size() + 2; - // 2 chunks wrapping templates and one chunk per template - final int templatesChunks = instance.templates().size() + 2; - // 2 chunks to wrap each custom - final int customChunks = 2 * instance.customs().size(); - // 1 chunk per datastream, 4 chunks to wrap ds and ds-aliases, or 0 if there are no datastreams - final int dsChunks = datastreams == 0 ? 0 : (datastreams + 4); - // 2 chunks to wrap index graveyard and one per tombstone - final int graveYardChunks = instance.indexGraveyard().getTombstones().size() + 2; - // 2 chunks to wrap component templates and one per component template - final int componentTemplateChunks = instance.componentTemplates().size() + 2; - // 2 chunks to wrap v2 templates and one per v2 template - final int v2TemplateChunks = instance.templatesV2().size() + 2; - // 1 chunk to close metadata - - return 2 + indicesChunks + reservedStateChunks + templatesChunks + customChunks + dsChunks + graveYardChunks - + componentTemplateChunks + v2TemplateChunks + 1; - }); + chunkCount += 1; + + return chunkCount; } /** @@ -2240,6 +2273,40 @@ public void testEnsureMetadataFieldCheckedForGlobalStateChanges() { assertThat(unclassifiedFields, empty()); } + public void testIsTimeSeriesTemplate() throws IOException { + var template = new Template(Settings.builder().put("index.mode", "time_series").build(), new CompressedXContent("{}"), null); + // Settings in component template: + { + var componentTemplate = new ComponentTemplate(template, null, null); + var indexTemplate = new ComposableIndexTemplate( + List.of("test-*"), + null, + List.of("component_template_1"), + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ); + Metadata m = Metadata.builder().put("component_template_1", componentTemplate).put("index_template_1", indexTemplate).build(); + assertThat(m.isTimeSeriesTemplate(indexTemplate), is(true)); + } + // Settings in composable index template: + { + var componentTemplate = new ComponentTemplate(new Template(null, null, null), null, null); + var indexTemplate = new ComposableIndexTemplate( + List.of("test-*"), + template, + List.of("component_template_1"), + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ); + Metadata m = Metadata.builder().put("component_template_1", componentTemplate).put("index_template_1", indexTemplate).build(); + assertThat(m.isTimeSeriesTemplate(indexTemplate), is(true)); + } + } + public static Metadata randomMetadata() { return randomMetadata(1); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceTests.java index f22b2a039e950..3c8cdb4bf799a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataUpdateSettingsServiceTests.java @@ -8,13 +8,13 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.Version; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.IndexScopedSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.codec.CodecService; import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.translog.Translog; @@ -30,7 +30,7 @@ public class MetadataUpdateSettingsServiceTests extends ESTestCase { private final Settings metaSettings = Settings.builder() .put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()) .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build(); private final IndexScopedSettings indexScopedSettings = new IndexScopedSettings( Settings.EMPTY, diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java index 7198fcccab922..ab92ab7917f3b 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.cluster.metadata; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; @@ -115,10 +115,10 @@ public void testSigtermIsRemoveInOlderVersions() throws IOException { .setGracePeriod(new TimeValue(1_000)) .build(); BytesStreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_7_1); + out.setTransportVersion(TransportVersions.V_8_7_1); metadata.writeTo(out); StreamInput in = out.bytes().streamInput(); - in.setTransportVersion(TransportVersion.V_8_7_1); + in.setTransportVersion(TransportVersions.V_8_7_1); assertThat(new SingleNodeShutdownMetadata(in).getType(), equalTo(SingleNodeShutdownMetadata.Type.REMOVE)); out = new BytesStreamOutput(); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index 21df3c5b5020b..8bcd9201092d8 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; import org.elasticsearch.cluster.coordination.CoordinationMetadata; import org.elasticsearch.common.Strings; @@ -275,7 +274,7 @@ public void testToXContentGateway_FlatSettingTrue_ReduceMappingFalse() throws IO }, "reserved_state" : { } } - }""", Version.CURRENT.id, Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentAPI_SameTypeName() throws IOException { @@ -371,7 +370,7 @@ public void testToXContentAPI_SameTypeName() throws IOException { }, "reserved_state" : { } } - }""", Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current()), Strings.toString(builder)); } public void testToXContentGateway_FlatSettingFalse_ReduceMappingTrue() throws IOException { @@ -436,7 +435,7 @@ public void testToXContentGateway_FlatSettingFalse_ReduceMappingTrue() throws IO }, "reserved_state" : { } } - }""", Version.CURRENT.id, Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOException { @@ -539,7 +538,7 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce }, "reserved_state" : { } } - }""", Version.CURRENT.id, Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOException { @@ -648,7 +647,7 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce }, "reserved_state" : { } } - }""", Version.CURRENT.id, Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } public void testToXContentAPIReservedMetadata() throws IOException { @@ -828,7 +827,7 @@ public void testToXContentAPIReservedMetadata() throws IOException { } } } - }""", Version.CURRENT.id, Version.CURRENT.id), Strings.toString(builder)); + }""", IndexVersion.current(), IndexVersion.current()), Strings.toString(builder)); } private Metadata buildMetadata() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java index 7ae05025ce069..ab2e52c9b921c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/node/DiscoveryNodeTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.node; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -113,11 +114,11 @@ public void testDiscoveryNodeRoleWithOldVersion() throws Exception { { BytesStreamOutput streamOutput = new BytesStreamOutput(); - streamOutput.setTransportVersion(TransportVersion.V_7_11_0); + streamOutput.setTransportVersion(TransportVersions.V_7_11_0); node.writeTo(streamOutput); StreamInput in = StreamInput.wrap(streamOutput.bytes().toBytesRef().bytes); - in.setTransportVersion(TransportVersion.V_7_11_0); + in.setTransportVersion(TransportVersions.V_7_11_0); DiscoveryNode serialized = new DiscoveryNode(in); final Set roles = serialized.getRoles(); assertThat(roles, hasSize(1)); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java index 81294c867f545..e6da007b085a3 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.cluster.routing.allocation; -import org.elasticsearch.Version; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; @@ -1697,7 +1696,7 @@ private static List createIndexMetadataForIndexNameToPriorityMap( .put(IndexMetadata.SETTING_PRIORITY, indexNameToPriority.getValue()) .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build(); indexMetadataBuilder.settings(settings); indexMetadataList.add(indexMetadataBuilder.build()); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 6326a18515f0c..c8e6a011bc52e 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -72,6 +72,8 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -1022,32 +1024,25 @@ public void testDoNotRebalanceToTheNodeThatNoLongerExists() { final var shardId = new ShardId(index, 0); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes( - DiscoveryNodes.builder() - // data-node-1 left the cluster - .localNodeId("data-node-2") - .masterNodeId("data-node-2") - .add(newNode("data-node-2")) - ) + .nodes(discoveryNodes(1))// node-1 left the cluster .metadata(Metadata.builder().put(indexMetadata, true)) .routingTable( - RoutingTable.builder() - .add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "data-node-2", true, STARTED))) + RoutingTable.builder().add(IndexRoutingTable.builder(index).addShard(newShardRouting(shardId, "node-0", true, STARTED))) ) .build(); final var allocation = createRoutingAllocationFrom(clusterState); final var balance = new DesiredBalance( 1, - Map.of(shardId, new ShardAssignment(Set.of("data-node-1"), 1, 0, 0)) // shard is assigned to the node that has left + Map.of(shardId, new ShardAssignment(Set.of("node-1"), 1, 0, 0)) // shard is assigned to the node that has left ); reconcile(allocation, balance); - assertThat(allocation.routingNodes().node("data-node-1"), nullValue()); - assertThat(allocation.routingNodes().node("data-node-2"), notNullValue()); + assertThat(allocation.routingNodes().node("node-0"), notNullValue()); + assertThat(allocation.routingNodes().node("node-1"), nullValue()); // shard is kept wherever until balance is recalculated - assertThat(allocation.routingNodes().node("data-node-2").getByShardId(shardId), notNullValue()); + assertThat(allocation.routingNodes().node("node-0").getByShardId(shardId), notNullValue()); } public void testDoNotAllocateIgnoredShards() { @@ -1057,7 +1052,7 @@ public void testDoNotAllocateIgnoredShards() { final var shardId = new ShardId(index, 0); final var clusterState = ClusterState.builder(ClusterName.DEFAULT) - .nodes(DiscoveryNodes.builder().localNodeId("node-1").masterNodeId("node-1").add(newNode("node-1"))) + .nodes(discoveryNodes(1)) .metadata(Metadata.builder().put(indexMetadata, true)) .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) .build(); @@ -1070,7 +1065,103 @@ public void testDoNotAllocateIgnoredShards() { reconcile(allocation, balance); + assertThat(allocation.routingNodes().node("node-0").size(), equalTo(0)); + assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(1)); + } + + public void testFallbackAllocation() { + + final var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 1)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes(4)) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) + .build(); + + final Set desiredNodeIds = Set.of("node-1", "node-2"); + final var initialForcedAllocationDecider = new AllocationDecider() { + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + // allocation on desired nodes is temporarily not possible + return desiredNodeIds.contains(node.nodeId()) ? Decision.NO : Decision.YES; + } + }; + + final var allocation = createRoutingAllocationFrom(clusterState, initialForcedAllocationDecider); + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(desiredNodeIds, 2, 0, 0))); + + reconcile(allocation, balance); + + // only primary is allocated to the fallback node, replica stays unassigned + assertThat(allocation.routingNodes().node("node-0").size() + allocation.routingNodes().node("node-1").size(), equalTo(0)); + assertThat(allocation.routingNodes().node("node-2").size() + allocation.routingNodes().node("node-3").size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(1)); + } + + public void testForcedInitialAllocation() { + + final var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes(2)) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) + .build(); + + final var allocationIsNotPossibleOnDesiredNodeDesiredNode = new AllocationDecider() { + @Override + public Optional> getForcedInitialShardAllocationToNodes(ShardRouting shardRouting, RoutingAllocation allocation) { + return Optional.of(Set.of("node-1"));// intentionally different from the desired balance + } + }; + + final var allocation = createRoutingAllocationFrom(clusterState, allocationIsNotPossibleOnDesiredNodeDesiredNode); + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(Set.of("node-0"), 1, 0, 0))); + + reconcile(allocation, balance); + + assertThat(allocation.routingNodes().node("node-0").size(), equalTo(0)); + assertThat(allocation.routingNodes().node("node-1").size(), equalTo(1)); + assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(0)); + } + + public void testForcedInitialAllocationDoNotFallback() { + + final var indexMetadata = IndexMetadata.builder("index-1").settings(indexSettings(IndexVersion.current(), 1, 0)).build(); + final var index = indexMetadata.getIndex(); + final var shardId = new ShardId(index, 0); + + final var clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(discoveryNodes(3)) + .metadata(Metadata.builder().put(indexMetadata, true)) + .routingTable(RoutingTable.builder(TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY).addAsNew(indexMetadata)) + .build(); + + final var initialForcedAllocationDecider = new AllocationDecider() { + @Override + public Optional> getForcedInitialShardAllocationToNodes(ShardRouting shardRouting, RoutingAllocation allocation) { + return Optional.of(Set.of("node-1"));// intentionally different from the desired balance + } + + @Override + public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) { + return Objects.equals(node.nodeId(), "node-2") ? Decision.YES : Decision.NO; // can allocate only on fallback node + } + }; + + final var allocation = createRoutingAllocationFrom(clusterState, initialForcedAllocationDecider); + final var balance = new DesiredBalance(1, Map.of(shardId, new ShardAssignment(Set.of("node-0"), 1, 0, 0))); + + reconcile(allocation, balance); + + assertThat(allocation.routingNodes().node("node-0").size(), equalTo(0)); assertThat(allocation.routingNodes().node("node-1").size(), equalTo(0)); + assertThat(allocation.routingNodes().node("node-2").size(), equalTo(0)); assertThat(allocation.routingNodes().unassigned().ignored(), hasSize(1)); } diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index da3ddec7a025f..0f375246c3337 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.serialization; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterModule; @@ -163,7 +164,7 @@ public void testSnapshotDeletionsInProgressSerialization() throws Exception { BytesStreamOutput outStream = new BytesStreamOutput(); TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current() ); outStream.setTransportVersion(version); @@ -360,7 +361,7 @@ public static NamedDiff readDiffFrom(StreamInput in) throws IOException @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } } @@ -399,7 +400,7 @@ public void testCustomSerialization() throws Exception { // serialize with minimum compatibile version outStream = new BytesStreamOutput(); - version = TransportVersion.MINIMUM_COMPATIBLE; + version = TransportVersions.MINIMUM_COMPATIBLE; outStream.setTransportVersion(version); diffs.writeTo(outStream); inStream = outStream.bytes().streamInput(); diff --git a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java index b43aa3ac23d6d..3d488b6d55bff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/service/TransportVersionsFixupListenerTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.cluster.service; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; @@ -21,7 +22,9 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.TransportVersionsFixupListener.NodeTransportVersionTask; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.Maps; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.mockito.ArgumentCaptor; @@ -29,12 +32,14 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.Executor; import static org.elasticsearch.test.LambdaMatchers.transformedMatch; import static org.hamcrest.Matchers.arrayContainingInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -107,10 +112,10 @@ public void testNothingFixedWhenNothingToInfer() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_8_0)) - .transportVersions(versions(TransportVersion.V_8_8_0)) + .compatibilityVersions(versions(new CompatibilityVersions(TransportVersions.V_8_8_0))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -122,10 +127,10 @@ public void testNothingFixedWhenOnNextVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION)) + .compatibilityVersions(versions(new CompatibilityVersions(NEXT_TRANSPORT_VERSION))) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -137,10 +142,12 @@ public void testNothingFixedWhenOnPreviousVersion() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(Version.V_8_7_0, Version.V_8_8_0)) - .transportVersions(versions(TransportVersion.V_8_7_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues(versions(TransportVersions.V_8_7_0, TransportVersions.V_8_8_0), CompatibilityVersions::new) + ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(taskQueue, never()).submitTask(anyString(), any(), any()); @@ -153,13 +160,18 @@ public void testVersionsAreFixed() { ClusterState testState = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor task = ArgumentCaptor.forClass(NodeTransportVersionTask.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); listeners.clusterChanged(new ClusterChangedEvent("test", testState, ClusterState.EMPTY_STATE)); verify(client).nodesInfo( argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), @@ -177,17 +189,27 @@ public void testConcurrentChangesDoNotOverlap() { ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, null, null); listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); verify(client).nodesInfo(argThat(transformedMatch(NodesInfoRequest::nodesIds, arrayContainingInAnyOrder("node1", "node2"))), any()); // don't send back the response yet ClusterState testState2 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); // should not send any requests listeners.clusterChanged(new ClusterChangedEvent("test", testState2, testState1)); @@ -199,21 +221,27 @@ public void testFailedRequestsAreRetried() { MasterServiceTaskQueue taskQueue = newMockTaskQueue(); ClusterAdminClient client = mock(ClusterAdminClient.class); Scheduler scheduler = mock(Scheduler.class); + Executor executor = mock(Executor.class); ClusterState testState1 = ClusterState.builder(ClusterState.EMPTY_STATE) .nodes(node(NEXT_VERSION, NEXT_VERSION, NEXT_VERSION)) - .transportVersions(versions(NEXT_TRANSPORT_VERSION, TransportVersion.V_8_8_0, TransportVersion.V_8_8_0)) + .compatibilityVersions( + Maps.transformValues( + versions(NEXT_TRANSPORT_VERSION, TransportVersions.V_8_8_0, TransportVersions.V_8_8_0), + CompatibilityVersions::new + ) + ) .build(); ArgumentCaptor> action = ArgumentCaptor.forClass(ActionListener.class); ArgumentCaptor retry = ArgumentCaptor.forClass(Runnable.class); - TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, scheduler); + TransportVersionsFixupListener listeners = new TransportVersionsFixupListener(taskQueue, client, scheduler, executor); listeners.clusterChanged(new ClusterChangedEvent("test", testState1, ClusterState.EMPTY_STATE)); verify(client, times(1)).nodesInfo(any(), action.capture()); // do response immediately action.getValue().onFailure(new RuntimeException("failure")); - verify(scheduler).schedule(retry.capture(), any(), any()); + verify(scheduler).schedule(retry.capture(), any(), same(executor)); // running retry should cause another check retry.getValue().run(); diff --git a/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java b/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java new file mode 100644 index 0000000000000..0391cbf83608c --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/version/CompatibilityVersionsTests.java @@ -0,0 +1,42 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.version; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; + +import java.util.Map; + +import static org.hamcrest.Matchers.equalTo; + +public class CompatibilityVersionsTests extends ESTestCase { + + public void testMinimumVersions() { + assertThat( + CompatibilityVersions.minimumVersions(Map.of()), + equalTo(new CompatibilityVersions(TransportVersions.MINIMUM_COMPATIBLE)) + ); + + TransportVersion version1 = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_COMPATIBLE, true); + TransportVersion version2 = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersionUtils.getNextVersion(version1, true), + TransportVersion.current() + ); + + CompatibilityVersions compatibilityVersions1 = new CompatibilityVersions(version1); + CompatibilityVersions compatibilityVersions2 = new CompatibilityVersions(version2); + + Map versionsMap = Map.of("node1", compatibilityVersions1, "node2", compatibilityVersions2); + + assertThat(CompatibilityVersions.minimumVersions(versionsMap), equalTo(compatibilityVersions1)); + } +} diff --git a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java index 3f9a3562008a2..341ebea2a2a0c 100644 --- a/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java +++ b/server/src/test/java/org/elasticsearch/common/collect/IteratorsTests.java @@ -17,7 +17,10 @@ import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; +import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.BiPredicate; +import java.util.function.ToIntFunction; import java.util.stream.IntStream; public class IteratorsTests extends ESTestCase { @@ -208,6 +211,48 @@ public void testMap() { assertEquals(array.length, index.get()); } + public void testEquals() { + final BiPredicate notCalled = (a, b) -> { throw new AssertionError("not called"); }; + + assertTrue(Iterators.equals(null, null, notCalled)); + assertFalse(Iterators.equals(Collections.emptyIterator(), null, notCalled)); + assertFalse(Iterators.equals(null, Collections.emptyIterator(), notCalled)); + assertTrue(Iterators.equals(Collections.emptyIterator(), Collections.emptyIterator(), notCalled)); + + assertFalse(Iterators.equals(Collections.emptyIterator(), List.of(1).iterator(), notCalled)); + assertFalse(Iterators.equals(List.of(1).iterator(), Collections.emptyIterator(), notCalled)); + assertTrue(Iterators.equals(List.of(1).iterator(), List.of(1).iterator(), Objects::equals)); + assertFalse(Iterators.equals(List.of(1).iterator(), List.of(2).iterator(), Objects::equals)); + assertFalse(Iterators.equals(List.of(1, 2).iterator(), List.of(1).iterator(), Objects::equals)); + assertFalse(Iterators.equals(List.of(1).iterator(), List.of(1, 2).iterator(), Objects::equals)); + + final var strings1 = randomList(10, () -> randomAlphaOfLength(10)); + final var strings2 = new ArrayList<>(strings1); + + assertTrue(Iterators.equals(strings1.iterator(), strings2.iterator(), Objects::equals)); + + if (strings2.size() == 0 || randomBoolean()) { + strings2.add(randomAlphaOfLength(10)); + } else { + final var index = between(0, strings2.size() - 1); + if (randomBoolean()) { + strings2.remove(index); + } else { + strings2.set(index, randomValueOtherThan(strings2.get(index), () -> randomAlphaOfLength(10))); + } + } + assertFalse(Iterators.equals(strings1.iterator(), strings2.iterator(), Objects::equals)); + } + + public void testHashCode() { + final ToIntFunction notCalled = (a) -> { throw new AssertionError("not called"); }; + assertEquals(0, Iterators.hashCode(null, notCalled)); + assertEquals(1, Iterators.hashCode(Collections.emptyIterator(), notCalled)); + + final var numbers = randomIntegerArray(); + assertEquals(Arrays.hashCode(numbers), Iterators.hashCode(Arrays.stream(numbers).iterator(), Objects::hashCode)); + } + private static Integer[] randomIntegerArray() { return Randomness.get().ints(randomIntBetween(0, 1000)).boxed().toArray(Integer[]::new); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java index 9b79aed0f3e52..1a6f52fabbd1b 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/AbstractStreamTests.java @@ -368,32 +368,42 @@ public int hashCode() { runWriteReadCollectionTest( () -> new FooBar(randomInt(), randomInt()), StreamOutput::writeCollection, - in -> in.readList(FooBar::new) + in -> in.readCollectionAsList(FooBar::new) ); runWriteReadCollectionTest( () -> new FooBar(randomInt(), randomInt()), StreamOutput::writeOptionalCollection, - in -> in.readOptionalList(FooBar::new) + in -> in.readOptionalCollectionAsList(FooBar::new) ); - runWriteReadOptionalCollectionWithNullInput(out -> out.writeOptionalCollection(null), in -> in.readOptionalList(FooBar::new)); + runWriteReadOptionalCollectionWithNullInput( + out -> out.writeOptionalCollection(null), + in -> in.readOptionalCollectionAsList(FooBar::new) + ); } public void testStringCollection() throws IOException { - runWriteReadCollectionTest(() -> randomUnicodeOfLength(16), StreamOutput::writeStringCollection, StreamInput::readStringList); + runWriteReadCollectionTest( + () -> randomUnicodeOfLength(16), + StreamOutput::writeStringCollection, + StreamInput::readStringCollectionAsList + ); } public void testOptionalStringCollection() throws IOException { runWriteReadCollectionTest( () -> randomUnicodeOfLength(16), StreamOutput::writeOptionalStringCollection, - StreamInput::readOptionalStringList + StreamInput::readOptionalStringCollectionAsList ); } public void testOptionalStringCollectionWithNullInput() throws IOException { - runWriteReadOptionalCollectionWithNullInput(out -> out.writeOptionalStringCollection(null), StreamInput::readOptionalStringList); + runWriteReadOptionalCollectionWithNullInput( + out -> out.writeOptionalStringCollection(null), + StreamInput::readOptionalStringCollectionAsList + ); } private void runWriteReadCollectionTest( @@ -437,7 +447,7 @@ public void testSetOfLongs() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); out.writeCollection(sourceSet, StreamOutput::writeLong); - final Set targetSet = getStreamInput(out.bytes()).readSet(StreamInput::readLong); + final Set targetSet = getStreamInput(out.bytes()).readCollectionAsSet(StreamInput::readLong); assertThat(targetSet, equalTo(sourceSet)); } @@ -683,7 +693,7 @@ public void assertImmutableListSerialization(List expected, Writeable.Rea final BytesReference bytesReference = output.bytes(); final StreamInput input = getStreamInput(bytesReference); - List got = input.readImmutableList(reader); + List got = input.readCollectionAsImmutableList(reader); assertThat(got, equalTo(expected)); expectThrows(UnsupportedOperationException.class, got::clear); diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java index 740f72ce02bd6..92e8abbe83e16 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/BytesStreamsTests.java @@ -386,9 +386,9 @@ public void testNamedWriteableList() throws IOException { } try (BytesStreamOutput out = new BytesStreamOutput()) { - out.writeNamedWriteableList(expected); + out.writeNamedWriteableCollection(expected); try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry)) { - assertEquals(expected, in.readNamedWriteableList(BaseNamedWriteable.class)); + assertEquals(expected, in.readNamedWriteableCollectionAsList(BaseNamedWriteable.class)); assertEquals(0, in.available()); } } @@ -476,11 +476,11 @@ public void testWriteWriteableList() throws IOException { } final BytesStreamOutput out = new BytesStreamOutput(); - out.writeList(expected); + out.writeCollection(expected); final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - final List loaded = in.readList(TestWriteable::new); + final List loaded = in.readCollectionAsList(TestWriteable::new); assertThat(loaded, hasSize(expected.size())); @@ -542,48 +542,6 @@ public void testWriteImmutableMapOfWritable() throws IOException { assertThat(expected, equalTo(loaded)); } - public void testWriteMapOfLists() throws IOException { - final int size = randomIntBetween(0, 5); - final Map> expected = Maps.newMapWithExpectedSize(size); - - for (int i = 0; i < size; ++i) { - int listSize = randomIntBetween(0, 5); - List list = new ArrayList<>(listSize); - - for (int j = 0; j < listSize; ++j) { - list.add(randomAlphaOfLength(5)); - } - - expected.put(randomAlphaOfLength(2), list); - } - - final BytesStreamOutput out = new BytesStreamOutput(); - out.writeMapOfLists(expected, StreamOutput::writeString, StreamOutput::writeString); - - final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - - final Map> loaded = in.readMapOfLists(StreamInput::readString); - - assertThat(loaded.size(), equalTo(expected.size())); - - for (Map.Entry> entry : expected.entrySet()) { - assertThat(loaded.containsKey(entry.getKey()), equalTo(true)); - - List loadedList = loaded.get(entry.getKey()); - - assertThat(loadedList, hasSize(entry.getValue().size())); - - for (int i = 0; i < loadedList.size(); ++i) { - assertEquals(entry.getValue().get(i), loadedList.get(i)); - } - } - - assertEquals(0, in.available()); - - in.close(); - out.close(); - } - public void testWriteMapAsList() throws IOException { final int size = randomIntBetween(0, 100); final Map expected = Maps.newMapWithExpectedSize(size); diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java index d5ee5fd779164..7e42653952a94 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/DelayableWriteableTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.io.stream; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -169,7 +170,7 @@ private DelayableWriteable roundTrip( DelayableWriteable delayed = copyInstance( original, writableRegistry(), - (out, d) -> d.writeTo(out), + StreamOutput::writeWriteable, in -> DelayableWriteable.delayed(reader, in), version ); @@ -178,7 +179,7 @@ private DelayableWriteable roundTrip( DelayableWriteable referencing = copyInstance( original, writableRegistry(), - (out, d) -> d.writeTo(out), + StreamOutput::writeWriteable, in -> DelayableWriteable.referencing(reader, in), version ); @@ -195,7 +196,7 @@ protected NamedWriteableRegistry writableRegistry() { private static TransportVersion randomOldVersion() { return TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TransportVersion.current()) ); } diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java index 63fe28d7fc37b..3077944490d5a 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/RecyclerBytesStreamOutputTests.java @@ -426,9 +426,9 @@ public void testNamedWriteableList() throws IOException { } try (RecyclerBytesStreamOutput out = new RecyclerBytesStreamOutput(recycler)) { - out.writeNamedWriteableList(expected); + out.writeNamedWriteableCollection(expected); try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry)) { - assertEquals(expected, in.readNamedWriteableList(BaseNamedWriteable.class)); + assertEquals(expected, in.readNamedWriteableCollectionAsList(BaseNamedWriteable.class)); assertEquals(0, in.available()); } } @@ -516,11 +516,11 @@ public void testWriteWriteableList() throws IOException { } final RecyclerBytesStreamOutput out = new RecyclerBytesStreamOutput(recycler); - out.writeList(expected); + out.writeCollection(expected); final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - final List loaded = in.readList(TestWriteable::new); + final List loaded = in.readCollectionAsList(TestWriteable::new); assertThat(loaded, hasSize(expected.size())); @@ -582,48 +582,6 @@ public void testWriteImmutableMapOfWritable() throws IOException { assertThat(expected, equalTo(loaded)); } - public void testWriteMapOfLists() throws IOException { - final int size = randomIntBetween(0, 5); - final Map> expected = Maps.newMapWithExpectedSize(size); - - for (int i = 0; i < size; ++i) { - int listSize = randomIntBetween(0, 5); - List list = new ArrayList<>(listSize); - - for (int j = 0; j < listSize; ++j) { - list.add(randomAlphaOfLength(5)); - } - - expected.put(randomAlphaOfLength(2), list); - } - - final RecyclerBytesStreamOutput out = new RecyclerBytesStreamOutput(recycler); - out.writeMapOfLists(expected, StreamOutput::writeString, StreamOutput::writeString); - - final StreamInput in = StreamInput.wrap(BytesReference.toBytes(out.bytes())); - - final Map> loaded = in.readMapOfLists(StreamInput::readString); - - assertThat(loaded.size(), equalTo(expected.size())); - - for (Map.Entry> entry : expected.entrySet()) { - assertThat(loaded.containsKey(entry.getKey()), equalTo(true)); - - List loadedList = loaded.get(entry.getKey()); - - assertThat(loadedList, hasSize(entry.getValue().size())); - - for (int i = 0; i < loadedList.size(); ++i) { - assertEquals(entry.getValue().get(i), loadedList.get(i)); - } - } - - assertEquals(0, in.available()); - - in.close(); - out.close(); - } - public void testWriteMapAsList() throws IOException { final int size = randomIntBetween(0, 100); final Map expected = Maps.newMapWithExpectedSize(size); diff --git a/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java b/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java index e26442284df00..bee63e72a3a0a 100644 --- a/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/stream/VersionCheckingStreamOutputTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.common.io.stream; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -36,7 +37,7 @@ public TransportVersion getMinimalSupportedVersion() { public void testCheckVersionCompatibility() throws IOException { TransportVersion streamVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TransportVersion.current()) ); try (VersionCheckingStreamOutput out = new VersionCheckingStreamOutput(streamVersion)) { diff --git a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java index fd7d7f04cbcd6..02084c818346b 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/LuceneTests.java @@ -439,8 +439,9 @@ public void testAsSequentialBitsUsesRandomAccess() throws IOException { w.addDocument(doc); } w.forceMerge(1); - try (IndexReader reader = DirectoryReader.open(w)) { - IndexSearcher searcher = newSearcher(reader); + try (IndexReader indexReader = DirectoryReader.open(w)) { + IndexSearcher searcher = newSearcher(indexReader); + IndexReader reader = searcher.getIndexReader(); searcher.setQueryCache(null); Query query = new IndexOrDocValuesQuery(new UnsupportedQuery(), NumericDocValuesField.newSlowRangeQuery("foo", 3L, 5L)); Weight weight = searcher.createWeight(query, ScoreMode.COMPLETE_NO_SCORES, 1f); diff --git a/server/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java b/server/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java index bc8600a8cc05c..219242019cb45 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/search/MultiPhrasePrefixQueryTests.java @@ -25,31 +25,33 @@ public class MultiPhrasePrefixQueryTests extends ESTestCase { public void testSimple() throws Exception { - IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - Document doc = new Document(); - doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED)); - writer.addDocument(doc); - IndexReader reader = DirectoryReader.open(writer); - IndexSearcher searcher = newSearcher(reader); - - MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery("field"); - query.add(new Term("field", "aa")); - assertThat(searcher.count(query), equalTo(1)); - - query = new MultiPhrasePrefixQuery("field"); - query.add(new Term("field", "aaa")); - query.add(new Term("field", "bb")); - assertThat(searcher.count(query), equalTo(1)); - - query = new MultiPhrasePrefixQuery("field"); - query.setSlop(1); - query.add(new Term("field", "aaa")); - query.add(new Term("field", "cc")); - assertThat(searcher.count(query), equalTo(1)); - - query = new MultiPhrasePrefixQuery("field"); - query.setSlop(1); - query.add(new Term("field", "xxx")); - assertThat(searcher.count(query), equalTo(0)); + try (IndexWriter writer = new IndexWriter(new ByteBuffersDirectory(), new IndexWriterConfig(Lucene.STANDARD_ANALYZER))) { + Document doc = new Document(); + doc.add(new Field("field", "aaa bbb ccc ddd", TextField.TYPE_NOT_STORED)); + writer.addDocument(doc); + try (IndexReader reader = DirectoryReader.open(writer)) { + IndexSearcher searcher = newSearcher(reader); + + MultiPhrasePrefixQuery query = new MultiPhrasePrefixQuery("field"); + query.add(new Term("field", "aa")); + assertThat(searcher.count(query), equalTo(1)); + + query = new MultiPhrasePrefixQuery("field"); + query.add(new Term("field", "aaa")); + query.add(new Term("field", "bb")); + assertThat(searcher.count(query), equalTo(1)); + + query = new MultiPhrasePrefixQuery("field"); + query.setSlop(1); + query.add(new Term("field", "aaa")); + query.add(new Term("field", "cc")); + assertThat(searcher.count(query), equalTo(1)); + + query = new MultiPhrasePrefixQuery("field"); + query.setSlop(1); + query.add(new Term("field", "xxx")); + assertThat(searcher.count(query), equalTo(0)); + } + } } } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java index be0ff95e8603e..976add854c584 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/uid/VersionsTests.java @@ -18,7 +18,6 @@ import org.apache.lucene.index.Term; import org.apache.lucene.store.Directory; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRouting; @@ -31,7 +30,7 @@ import org.elasticsearch.index.mapper.VersionFieldMapper; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.VersionUtils; +import org.elasticsearch.test.index.IndexVersionUtils; import java.io.IOException; import java.util.ArrayList; @@ -201,23 +200,24 @@ public void testCacheFilterReader() throws Exception { public void testLuceneVersionOnUnknownVersions() { // between two known versions, should use the lucene version of the previous version - Version version = VersionUtils.getPreviousVersion(Version.CURRENT); - final Version nextVersion = Version.fromId(version.id + 100); - if (Version.getDeclaredVersions(Version.class).contains(nextVersion) == false) { + IndexVersion version = IndexVersionUtils.getPreviousVersion(); + final IndexVersion nextVersion = IndexVersion.fromId(version.id() + 100); + if (IndexVersionUtils.allReleasedVersions().contains(nextVersion) == false) { // the version is not known, we make an assumption the Lucene version stays the same - assertEquals(nextVersion.luceneVersion(), version.luceneVersion()); + assertThat(version.luceneVersion(), equalTo(nextVersion.luceneVersion())); } else { // the version is known, the most we can assert is that the Lucene version is not earlier + // Version does not implement Comparable :( assertTrue(nextVersion.luceneVersion().onOrAfter(version.luceneVersion())); } // too old version, major should be the oldest supported lucene version minus 1 - version = Version.fromString("5.2.1"); - assertEquals(VersionUtils.getFirstVersion().luceneVersion().major - 1, version.luceneVersion().major); + version = IndexVersion.fromId(5020199); + assertThat(version.luceneVersion().major, equalTo(IndexVersionUtils.getFirstVersion().luceneVersion().major - 1)); // future version, should be the same version as today - version = Version.fromId(Version.CURRENT.id + 100); - assertEquals(Version.CURRENT.luceneVersion(), version.luceneVersion()); + version = IndexVersion.fromId(IndexVersion.current().id() + 100); + assertThat(version.luceneVersion(), equalTo(IndexVersion.current().luceneVersion())); } public void testTimeSeriesLoadDocIdAndVersion() throws Exception { diff --git a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java index bb0b0e8bfef43..d2a7036b7db6f 100644 --- a/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java +++ b/server/src/test/java/org/elasticsearch/common/network/NetworkModuleTests.java @@ -38,6 +38,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiConsumer; @@ -237,7 +238,7 @@ public void testRegisterInterceptor() { @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { diff --git a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java index 6acc7aa32bf48..965f305c3c23f 100644 --- a/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java +++ b/server/src/test/java/org/elasticsearch/common/settings/SettingsTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -622,7 +623,7 @@ public void testMissingValue() throws Exception { public void testReadWriteArray() throws IOException { BytesStreamOutput output = new BytesStreamOutput(); - output.setTransportVersion(randomFrom(TransportVersion.current(), TransportVersion.V_7_0_0)); + output.setTransportVersion(randomFrom(TransportVersion.current(), TransportVersions.V_7_0_0)); Settings settings = Settings.builder().putList("foo.bar", "0", "1", "2", "3").put("foo.bar.baz", "baz").build(); settings.writeTo(output); StreamInput in = StreamInput.wrap(BytesReference.toBytes(output.bytes())); diff --git a/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java b/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java index af60f166caf9c..0ca6bf86ceec7 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BytesRefArrayTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.test.ESTestCase; @@ -46,7 +47,7 @@ public void testRandomWithSerialization() throws IOException { BytesRefArray copy = copyInstance( array, writableRegistry(), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, in -> new BytesRefArray(in, mockBigArrays()), TransportVersion.current() ); @@ -95,7 +96,7 @@ public void testLookup() throws IOException { array = copyInstance( inArray, writableRegistry(), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, in -> new BytesRefArray(in, mockBigArrays()), TransportVersion.current() ); diff --git a/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java b/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java index 0b1fb35235f48..ac8c817699e33 100644 --- a/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/BytesRefHashTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Releasables; @@ -392,7 +393,7 @@ public void testGetByteRefsAndSerialization() throws IOException { BytesRefArray refArrayCopy = copyInstance( refArray, writableRegistry(), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, in -> new BytesRefArray(in, mockBigArrays()), TransportVersion.current() ); diff --git a/server/src/test/java/org/elasticsearch/common/util/MapsTests.java b/server/src/test/java/org/elasticsearch/common/util/MapsTests.java index 532df29c00b70..525c04cae9957 100644 --- a/server/src/test/java/org/elasticsearch/common/util/MapsTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/MapsTests.java @@ -99,7 +99,31 @@ public void testOfEntries() { assertMapEntriesAndImmutability(map, entries); } - public void testDeepEquals() { + public void testDeepEqualsMapsWithSimpleValues() { + final Supplier keyGenerator = () -> randomAlphaOfLengthBetween(1, 5); + final Supplier valueGenerator = () -> randomInt(5); + final Map map = randomMap(randomInt(5), keyGenerator, valueGenerator); + final Map mapCopy = new HashMap<>(map); + + assertTrue(Maps.deepEquals(map, mapCopy)); + + final Map mapModified = mapCopy; + if (mapModified.isEmpty()) { + mapModified.put(keyGenerator.get(), valueGenerator.get()); + } else { + if (randomBoolean()) { + final String randomKey = mapModified.keySet().toArray(new String[0])[randomInt(mapModified.size() - 1)]; + final int value = mapModified.get(randomKey); + mapModified.put(randomKey, randomValueOtherThanMany((v) -> v.equals(value), valueGenerator)); + } else { + mapModified.put(randomValueOtherThanMany(mapModified::containsKey, keyGenerator), valueGenerator.get()); + } + } + + assertFalse(Maps.deepEquals(map, mapModified)); + } + + public void testDeepEqualsMapsWithArrayValues() { final Supplier keyGenerator = () -> randomAlphaOfLengthBetween(1, 5); final Supplier arrayValueGenerator = () -> random().ints(randomInt(5)).toArray(); final Map map = randomMap(randomInt(5), keyGenerator, arrayValueGenerator); @@ -125,6 +149,42 @@ public void testDeepEquals() { assertFalse(Maps.deepEquals(map, mapModified)); } + public void testDeepEqualsMapsWithMapValuesSimple() { + Map> m1 = Map.of("a", Map.of("b", new int[] { 1 })); + Map> m2 = Map.of("a", Map.of("b", new int[] { 1 })); + assertTrue(Maps.deepEquals(m1, m2)); + } + + public void testDeepEqualsMapsWithMapValues() { + final Supplier keyGenerator = () -> randomAlphaOfLengthBetween(1, 5); + final Supplier> mapValueGenerator = () -> Map.of("nested", random().ints(randomInt(5)).toArray()); + final Map> map = randomMap(randomInt(5), keyGenerator, mapValueGenerator); + final Map> mapCopy = map.entrySet().stream().collect(toMap(Map.Entry::getKey, e -> { + int[] value = e.getValue().get("nested"); + return Map.of("nested", Arrays.copyOf(value, value.length)); + })); + + assertTrue(Maps.deepEquals(map, mapCopy)); + + final Map> mapModified = mapCopy; + if (mapModified.isEmpty()) { + mapModified.put(keyGenerator.get(), mapValueGenerator.get()); + } else { + if (randomBoolean()) { + final String randomKey = mapModified.keySet().toArray(new String[0])[randomInt(mapModified.size() - 1)]; + final Map value = mapModified.get(randomKey); + mapModified.put( + randomKey, + randomValueOtherThanMany((v) -> Arrays.equals(v.get("nested"), value.get("nested")), mapValueGenerator) + ); + } else { + mapModified.put(randomValueOtherThanMany(mapModified::containsKey, keyGenerator), mapValueGenerator.get()); + } + } + + assertFalse(Maps.deepEquals(map, mapModified)); + } + public void testCollectToUnmodifiableSortedMap() { SortedMap canadianProvinces = Stream.of( new Tuple<>("ON", "Ontario"), diff --git a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java index 0a7bf591bb2c7..cdb85aaae2fc6 100644 --- a/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java +++ b/server/src/test/java/org/elasticsearch/common/util/concurrent/AbstractAsyncTaskTests.java @@ -45,7 +45,7 @@ public void testAutoRepeat() throws Exception { final CyclicBarrier barrier1 = new CyclicBarrier(2); // 1 for runInternal plus 1 for the test sequence final CyclicBarrier barrier2 = new CyclicBarrier(2); // 1 for runInternal plus 1 for the test sequence final AtomicInteger count = new AtomicInteger(); - AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, TimeValue.timeValueMillis(1), true) { + AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, threadPool.generic(), TimeValue.timeValueMillis(1), true) { @Override protected boolean mustReschedule() { @@ -71,10 +71,6 @@ protected void runInternal() { } } - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } }; assertFalse(task.isScheduled()); @@ -101,7 +97,7 @@ public void testManualRepeat() throws Exception { boolean shouldRunThrowException = randomBoolean(); final CyclicBarrier barrier = new CyclicBarrier(2); // 1 for runInternal plus 1 for the test sequence final AtomicInteger count = new AtomicInteger(); - AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, TimeValue.timeValueMillis(1), false) { + AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, threadPool.generic(), TimeValue.timeValueMillis(1), false) { @Override protected boolean mustReschedule() { @@ -122,10 +118,6 @@ protected void runInternal() { } } - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } }; assertFalse(task.isScheduled()); @@ -148,7 +140,13 @@ protected String getThreadPool() { public void testCloseWithNoRun() { - AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, TimeValue.timeValueMinutes(10), true) { + AbstractAsyncTask task = new AbstractAsyncTask( + logger, + threadPool, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + TimeValue.timeValueMinutes(10), + true + ) { @Override protected boolean mustReschedule() { @@ -171,7 +169,13 @@ public void testChangeInterval() throws Exception { final CountDownLatch latch = new CountDownLatch(2); - AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, TimeValue.timeValueHours(1), true) { + AbstractAsyncTask task = new AbstractAsyncTask( + logger, + threadPool, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + TimeValue.timeValueHours(1), + true + ) { @Override protected boolean mustReschedule() { @@ -202,7 +206,14 @@ public void testIsScheduledRemainFalseAfterClose() throws Exception { List tasks = new ArrayList<>(numTasks); AtomicLong counter = new AtomicLong(); for (int i = 0; i < numTasks; i++) { - AbstractAsyncTask task = new AbstractAsyncTask(logger, threadPool, TimeValue.timeValueMillis(randomIntBetween(1, 2)), true) { + AbstractAsyncTask task = new AbstractAsyncTask( + logger, + threadPool, + EsExecutors.DIRECT_EXECUTOR_SERVICE, + TimeValue.timeValueMillis(randomIntBetween(1, 2)), + true + ) { + @Override protected boolean mustReschedule() { return counter.get() <= 1000; diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java index 0fe25a6b74412..24e635b2f8b76 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/SimpleLuceneTests.java @@ -44,47 +44,55 @@ public class SimpleLuceneTests extends ESTestCase { public void testSortValues() throws Exception { - Directory dir = new ByteBuffersDirectory(); - IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - for (int i = 0; i < 10; i++) { - Document document = new Document(); - String text = new String(new char[] { (char) (97 + i), (char) (97 + i) }); - document.add(new TextField("str", text, Field.Store.YES)); - document.add(new SortedDocValuesField("str", new BytesRef(text))); - indexWriter.addDocument(document); - } - IndexReader reader = DirectoryReader.open(indexWriter); - IndexSearcher searcher = newSearcher(reader); - TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING))); - for (int i = 0; i < 10; i++) { - FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i]; - assertThat((BytesRef) fieldDoc.fields[0], equalTo(new BytesRef(new String(new char[] { (char) (97 + i), (char) (97 + i) })))); + try ( + Directory dir = new ByteBuffersDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)) + ) { + for (int i = 0; i < 10; i++) { + Document document = new Document(); + String text = new String(new char[] { (char) (97 + i), (char) (97 + i) }); + document.add(new TextField("str", text, Field.Store.YES)); + document.add(new SortedDocValuesField("str", new BytesRef(text))); + indexWriter.addDocument(document); + } + try (IndexReader reader = DirectoryReader.open(indexWriter)) { + IndexSearcher searcher = newSearcher(reader); + TopFieldDocs docs = searcher.search(new MatchAllDocsQuery(), 10, new Sort(new SortField("str", SortField.Type.STRING))); + for (int i = 0; i < 10; i++) { + FieldDoc fieldDoc = (FieldDoc) docs.scoreDocs[i]; + assertThat( + (BytesRef) fieldDoc.fields[0], + equalTo(new BytesRef(new String(new char[] { (char) (97 + i), (char) (97 + i) }))) + ); + } + } } } public void testSimpleNumericOps() throws Exception { - Directory dir = new ByteBuffersDirectory(); - IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - - Document document = new Document(); - document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new IntPoint("test", 2)); - document.add(new StoredField("test", 2)); - indexWriter.addDocument(document); - - IndexReader reader = DirectoryReader.open(indexWriter); - IndexSearcher searcher = newSearcher(reader); - TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - Document doc = searcher.doc(topDocs.scoreDocs[0].doc); - IndexableField f = doc.getField("test"); - assertThat(f.numericValue(), equalTo(2)); - - topDocs = searcher.search(IntPoint.newExactQuery("test", 2), 1); - doc = searcher.doc(topDocs.scoreDocs[0].doc); - f = doc.getField("test"); - assertThat(f.stringValue(), equalTo("2")); + try ( + Directory dir = new ByteBuffersDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)) + ) { + Document document = new Document(); + document.add(new TextField("_id", "1", Field.Store.YES)); + document.add(new IntPoint("test", 2)); + document.add(new StoredField("test", 2)); + indexWriter.addDocument(document); - indexWriter.close(); + try (IndexReader reader = DirectoryReader.open(indexWriter)) { + IndexSearcher searcher = newSearcher(reader); + TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); + Document doc = searcher.doc(topDocs.scoreDocs[0].doc); + IndexableField f = doc.getField("test"); + assertThat(f.numericValue(), equalTo(2)); + + topDocs = searcher.search(IntPoint.newExactQuery("test", 2), 1); + doc = searcher.doc(topDocs.scoreDocs[0].doc); + f = doc.getField("test"); + assertThat(f.stringValue(), equalTo("2")); + } + } } /** @@ -93,54 +101,55 @@ public void testSimpleNumericOps() throws Exception { * first (with load and break). */ public void testOrdering() throws Exception { - Directory dir = new ByteBuffersDirectory(); - IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - - Document document = new Document(); - document.add(new TextField("_id", "1", Field.Store.YES)); - document.add(new TextField("#id", "1", Field.Store.YES)); - indexWriter.addDocument(document); + try ( + Directory dir = new ByteBuffersDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)) + ) { + Document document = new Document(); + document.add(new TextField("_id", "1", Field.Store.YES)); + document.add(new TextField("#id", "1", Field.Store.YES)); + indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter); - IndexSearcher searcher = newSearcher(reader); - TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); - final ArrayList fieldsOrder = new ArrayList<>(); - searcher.doc(topDocs.scoreDocs[0].doc, new StoredFieldVisitor() { - @Override - public Status needsField(FieldInfo fieldInfo) throws IOException { - fieldsOrder.add(fieldInfo.name); - return Status.YES; + try (IndexReader reader = DirectoryReader.open(indexWriter)) { + IndexSearcher searcher = newSearcher(reader); + TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); + final ArrayList fieldsOrder = new ArrayList<>(); + searcher.doc(topDocs.scoreDocs[0].doc, new StoredFieldVisitor() { + @Override + public Status needsField(FieldInfo fieldInfo) throws IOException { + fieldsOrder.add(fieldInfo.name); + return Status.YES; + } + }); + + assertThat(fieldsOrder.size(), equalTo(2)); + assertThat(fieldsOrder.get(0), equalTo("_id")); + assertThat(fieldsOrder.get(1), equalTo("#id")); } - }); - - assertThat(fieldsOrder.size(), equalTo(2)); - assertThat(fieldsOrder.get(0), equalTo("_id")); - assertThat(fieldsOrder.get(1), equalTo("#id")); - - indexWriter.close(); + } } public void testNRTSearchOnClosedWriter() throws Exception { - Directory dir = new ByteBuffersDirectory(); - IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); - DirectoryReader reader = DirectoryReader.open(indexWriter); - - for (int i = 0; i < 100; i++) { - Document document = new Document(); - TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES); - document.add(field); - indexWriter.addDocument(document); - } - reader = refreshReader(reader); - - indexWriter.close(); - - for (LeafReaderContext leaf : reader.leaves()) { - leaf.reader().terms("_id").iterator().next(); + try ( + Directory dir = new ByteBuffersDirectory(); + IndexWriter indexWriter = new IndexWriter(dir, new IndexWriterConfig(Lucene.STANDARD_ANALYZER)); + DirectoryReader reader = DirectoryReader.open(indexWriter) + ) { + for (int i = 0; i < 100; i++) { + Document document = new Document(); + TextField field = new TextField("_id", Integer.toString(i), Field.Store.YES); + document.add(field); + indexWriter.addDocument(document); + } + try (DirectoryReader refreshedReader = refreshReader(reader)) { + for (LeafReaderContext leaf : refreshedReader.leaves()) { + leaf.reader().terms("_id").iterator().next(); + } + } } } - private DirectoryReader refreshReader(DirectoryReader reader) throws IOException { + private static DirectoryReader refreshReader(DirectoryReader reader) throws IOException { DirectoryReader oldReader = reader; reader = DirectoryReader.openIfChanged(reader); if (reader != oldReader) { diff --git a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java index c9350a3ec4dea..7122c1465a27d 100644 --- a/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java +++ b/server/src/test/java/org/elasticsearch/deps/lucene/VectorHighlighterTests.java @@ -27,6 +27,7 @@ import org.apache.lucene.store.ByteBuffersDirectory; import org.apache.lucene.store.Directory; import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.core.IOUtils; import org.elasticsearch.lucene.search.vectorhighlight.CustomFieldQuery; import org.elasticsearch.test.ESTestCase; @@ -57,13 +58,14 @@ public void testVectorHighlighter() throws Exception { FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment( highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), - reader, + searcher.getIndexReader(), topDocs.scoreDocs[0].doc, "content", 30 ); assertThat(fragment, notNullValue()); assertThat(fragment, equalTo("the big bad dog")); + IOUtils.close(reader, indexWriter, dir); } public void testVectorHighlighterPrefixQuery() throws Exception { @@ -79,8 +81,9 @@ public void testVectorHighlighterPrefixQuery() throws Exception { document.add(new Field("content", "the big bad dog", vectorsType)); indexWriter.addDocument(document); - IndexReader reader = DirectoryReader.open(indexWriter); - IndexSearcher searcher = newSearcher(reader); + IndexReader indexReader = DirectoryReader.open(indexWriter); + IndexSearcher searcher = newSearcher(indexReader); + IndexReader reader = searcher.getIndexReader(); TopDocs topDocs = searcher.search(new TermQuery(new Term("_id", "1")), 1); assertThat(topDocs.totalHits.value, equalTo(1L)); @@ -119,6 +122,7 @@ public void testVectorHighlighterPrefixQuery() throws Exception { 30 ); assertThat(fragment, notNullValue()); + IOUtils.close(indexReader, indexWriter, dir); } public void testVectorHighlighterNoStore() throws Exception { @@ -143,12 +147,13 @@ public void testVectorHighlighterNoStore() throws Exception { FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment( highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), - reader, + searcher.getIndexReader(), topDocs.scoreDocs[0].doc, "content", 30 ); assertThat(fragment, nullValue()); + IOUtils.close(reader, indexWriter, dir); } public void testVectorHighlighterNoTermVector() throws Exception { @@ -169,11 +174,12 @@ public void testVectorHighlighterNoTermVector() throws Exception { FastVectorHighlighter highlighter = new FastVectorHighlighter(); String fragment = highlighter.getBestFragment( highlighter.getFieldQuery(new TermQuery(new Term("content", "bad"))), - reader, + searcher.getIndexReader(), topDocs.scoreDocs[0].doc, "content", 30 ); assertThat(fragment, nullValue()); + IOUtils.close(reader, indexWriter, dir); } } diff --git a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java index 769cd2ce8105f..3b1b316e44bae 100644 --- a/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java +++ b/server/src/test/java/org/elasticsearch/discovery/DiscoveryModuleTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.cluster.service.ClusterApplier; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; import org.elasticsearch.common.settings.ClusterSettings; @@ -113,7 +114,8 @@ private DiscoveryModule newModule( gatewayMetaState, mock(RerouteService.class), null, - new NoneCircuitBreakerService() + new NoneCircuitBreakerService(), + CompatibilityVersionsUtils.staticCurrent() ); } diff --git a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java index 4ea843bb7ece0..5112556fe79af 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ClusterStateUpdatersTests.java @@ -7,7 +7,6 @@ */ package org.elasticsearch.gateway; -import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.TestShardRoutingRoleStrategies; import org.elasticsearch.cluster.block.ClusterBlocks; @@ -18,6 +17,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; @@ -217,7 +217,7 @@ public void testSetLocalNode() { final ClusterState initialState = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); final DiscoveryNode localNode = DiscoveryNodeUtils.builder("node1").roles(Sets.newHashSet(DiscoveryNodeRole.MASTER_ROLE)).build(); - final ClusterState updatedState = setLocalNode(initialState, localNode, TransportVersion.current()); + final ClusterState updatedState = setLocalNode(initialState, localNode, CompatibilityVersionsUtils.staticCurrent()); assertMetadataEquals(initialState, updatedState); assertThat(updatedState.nodes().getLocalNode(), equalTo(localNode)); @@ -261,7 +261,7 @@ public void testHideStateIfNotRecovered() { .build(); final DiscoveryNode localNode = DiscoveryNodeUtils.builder("node1").roles(Sets.newHashSet(DiscoveryNodeRole.MASTER_ROLE)).build(); final ClusterState updatedState = Function.identity() - .andThen(state -> setLocalNode(state, localNode, TransportVersion.current())) + .andThen(state -> setLocalNode(state, localNode, CompatibilityVersionsUtils.staticCurrent())) .andThen(ClusterStateUpdaters::recoverClusterBlocks) .apply(initialState); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java index d0c0ccc8a8804..547e4e4eccda4 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStatePersistedStateTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; @@ -401,7 +402,8 @@ public void testDataOnlyNodePersistence() throws Exception { null, null, persistedClusterStateService, - List.of() + List.of(), + CompatibilityVersionsUtils.staticCurrent() ); final CoordinationState.PersistedState persistedState = gateway.getPersistedState(); assertThat(persistedState, instanceOf(GatewayMetaState.AsyncPersistedState.class)); diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index b5aacd7f93fd5..22869ad37524c 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.plugins.ClusterCoordinationPlugin; @@ -163,7 +164,17 @@ public Optional getPersistedStateFactory() { assertThat( expectThrows( IllegalStateException.class, - () -> gatewayMetaState.start(null, null, null, null, null, null, null, List.of(duplicatePlugin, duplicatePlugin)) + () -> gatewayMetaState.start( + null, + null, + null, + null, + null, + null, + null, + List.of(duplicatePlugin, duplicatePlugin), + CompatibilityVersionsUtils.staticCurrent() + ) ).getMessage(), containsString("multiple persisted-state factories") ); @@ -173,7 +184,7 @@ public Optional getPersistedStateFactory() { public Optional getPersistedStateFactory() { return Optional.of((settings, transportService, persistedClusterStateService) -> testPersistedState); } - })); + }), CompatibilityVersionsUtils.staticCurrent()); assertSame(testPersistedState, gatewayMetaState.getPersistedState()); } } diff --git a/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java b/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java index 7ae072e441f81..7d7eb5c1a5697 100644 --- a/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java +++ b/server/src/test/java/org/elasticsearch/health/HealthServiceTests.java @@ -21,7 +21,6 @@ import org.junit.After; import org.junit.Before; -import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,7 +61,6 @@ public void testShouldReturnGroupedIndicators() throws Exception { var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); var service = new HealthService( - Collections.emptyList(), List.of( createMockHealthIndicatorService(networkLatency), createMockHealthIndicatorService(slowTasks), @@ -119,7 +117,6 @@ public void testMissingIndicator() throws Exception { var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); var service = new HealthService( - Collections.emptyList(), List.of( createMockHealthIndicatorService(networkLatency), createMockHealthIndicatorService(slowTasks), @@ -142,7 +139,7 @@ public void testMissingIndicator() throws Exception { public void testValidateSize() { var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); - var service = new HealthService(Collections.emptyList(), List.of(createMockHealthIndicatorService(shardsAvailable)), threadPool); + var service = new HealthService(List.of(createMockHealthIndicatorService(shardsAvailable)), threadPool); NodeClient client = getTestClient(HealthInfo.EMPTY_HEALTH_INFO); IllegalArgumentException illegalArgumentException = expectThrows( IllegalArgumentException.class, @@ -219,8 +216,8 @@ public void testPreflightIndicatorResultsPresent() throws Exception { var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); var service = new HealthService( - List.of(createMockHealthIndicatorService(hasMaster)), List.of( + createMockHealthIndicatorService(true, hasMaster, null), createMockHealthIndicatorService(networkLatency), createMockHealthIndicatorService(slowTasks), createMockHealthIndicatorService(shardsAvailable) @@ -259,8 +256,8 @@ public void testThatIndicatorsGetHealthInfoData() throws Exception { var service = new HealthService( // The preflight indicator does not get data because the data is not fetched until after the preflight check - List.of(createMockHealthIndicatorService(hasMaster, HealthInfo.EMPTY_HEALTH_INFO)), List.of( + createMockHealthIndicatorService(true, hasMaster, HealthInfo.EMPTY_HEALTH_INFO), createMockHealthIndicatorService(networkLatency, healthInfo), createMockHealthIndicatorService(slowTasks, healthInfo), createMockHealthIndicatorService(shardsAvailable, healthInfo) @@ -288,8 +285,9 @@ public void testPreflightIndicatorFailureTriggersUnknownResults() throws Excepti var shardsAvailable = new HealthIndicatorResult("shards_availability", GREEN, null, null, null, null); var service = new HealthService( - List.of(createMockHealthIndicatorService(hasMaster), createMockHealthIndicatorService(hasStorage)), List.of( + createMockHealthIndicatorService(true, hasMaster, null), + createMockHealthIndicatorService(true, hasStorage, null), createMockHealthIndicatorService(networkLatency), createMockHealthIndicatorService(slowTasks), createMockHealthIndicatorService(shardsAvailable) @@ -360,18 +358,27 @@ private NodeClient getTestClient(HealthInfo healthInfo) { } private static HealthIndicatorService createMockHealthIndicatorService(HealthIndicatorResult result) { - return createMockHealthIndicatorService(result, null); + return createMockHealthIndicatorService(false, result, null); + } + + private static HealthIndicatorService createMockHealthIndicatorService(HealthIndicatorResult result, HealthInfo expectedHealthInfo) { + return createMockHealthIndicatorService(false, result, expectedHealthInfo); } /** * This returns a test HealthIndicatorService + * @param isPreflight true if it's a preflight indicator * @param result The HealthIndicatorResult that will be returned by the calculate method when the HealthIndicatorService returned by * this method is called * @param expectedHealthInfo If this HealthInfo is not null then the returned HealthIndicatorService's calculate method will assert * that the HealthInfo it is passed is equal to this when it is called * @return A test HealthIndicatorService */ - private static HealthIndicatorService createMockHealthIndicatorService(HealthIndicatorResult result, HealthInfo expectedHealthInfo) { + private static HealthIndicatorService createMockHealthIndicatorService( + boolean isPreflight, + HealthIndicatorResult result, + HealthInfo expectedHealthInfo + ) { return new HealthIndicatorService() { @Override public String name() { @@ -385,6 +392,11 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources } return result; } + + @Override + public boolean isPreflight() { + return isPreflight; + } }; } } diff --git a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java index dba69e7db7ba4..eb034778be63d 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexServiceTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.query.QueryBuilder; @@ -24,7 +25,6 @@ import org.elasticsearch.plugins.Plugin; import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.InternalSettingsPlugin; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; @@ -75,7 +75,11 @@ public void testBaseAsyncTask() throws Exception { AtomicReference latch = new AtomicReference<>(new CountDownLatch(1)); AtomicReference latch2 = new AtomicReference<>(new CountDownLatch(1)); final AtomicInteger count = new AtomicInteger(); - IndexService.BaseAsyncTask task = new IndexService.BaseAsyncTask(indexService, TimeValue.timeValueMillis(1)) { + IndexService.BaseAsyncTask task = new IndexService.BaseAsyncTask( + indexService, + indexService.getThreadPool().generic(), + TimeValue.timeValueMillis(1) + ) { @Override protected void runInternal() { final CountDownLatch l1 = latch.get(); @@ -96,11 +100,6 @@ protected void runInternal() { } } } - - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } }; latch.get().await(); @@ -115,11 +114,9 @@ protected String getThreadPool() { latch2.get().countDown(); assertEquals(2, count.get()); - task = new IndexService.BaseAsyncTask(indexService, TimeValue.timeValueMillis(1000000)) { + task = new IndexService.BaseAsyncTask(indexService, EsExecutors.DIRECT_EXECUTOR_SERVICE, TimeValue.timeValueMillis(1000000)) { @Override - protected void runInternal() { - - } + protected void runInternal() {} }; assertTrue(task.mustReschedule()); @@ -140,11 +137,9 @@ protected void runInternal() { indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index); assertNotSame(closedIndexService, indexService); - task = new IndexService.BaseAsyncTask(indexService, TimeValue.timeValueMillis(100000)) { + task = new IndexService.BaseAsyncTask(indexService, EsExecutors.DIRECT_EXECUTOR_SERVICE, TimeValue.timeValueMillis(100000)) { @Override - protected void runInternal() { - - } + protected void runInternal() {} }; assertTrue(task.mustReschedule()); assertFalse(task.isClosed()); diff --git a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java index 89d6b880bfd42..4ffd98eb4d8a9 100644 --- a/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java +++ b/server/src/test/java/org/elasticsearch/index/IndexVersionTests.java @@ -9,11 +9,14 @@ package org.elasticsearch.index; import org.apache.lucene.util.Version; +import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.index.IndexVersionUtils; +import org.hamcrest.Matchers; import java.lang.reflect.Modifier; import java.util.Collections; +import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.regex.Matcher; @@ -163,11 +166,13 @@ public void testMinimumCompatibleVersion() { public void testVersionConstantPresent() { Set ignore = Set.of(IndexVersion.ZERO, IndexVersion.current(), IndexVersion.MINIMUM_COMPATIBLE); assertThat(IndexVersion.current(), sameInstance(IndexVersion.fromId(IndexVersion.current().id()))); + assertThat(IndexVersion.current().luceneVersion(), equalTo(org.apache.lucene.util.Version.LATEST)); final int iters = scaledRandomIntBetween(20, 100); for (int i = 0; i < iters; i++) { IndexVersion version = IndexVersionUtils.randomVersion(ignore); assertThat(version, sameInstance(IndexVersion.fromId(version.id()))); + assertThat(version.luceneVersion(), sameInstance(IndexVersion.fromId(version.id()).luceneVersion())); } } @@ -182,4 +187,13 @@ public void testToString() { assertEquals("2000099", IndexVersion.fromId(2_00_00_99).toString()); assertEquals("5000099", IndexVersion.fromId(5_00_00_99).toString()); } + + public void testParseLenient() { + // note this is just a silly sanity check, we test it in lucene + for (IndexVersion version : IndexVersionUtils.allReleasedVersions()) { + org.apache.lucene.util.Version luceneVersion = version.luceneVersion(); + String string = luceneVersion.toString().toUpperCase(Locale.ROOT).replaceFirst("^LUCENE_(\\d+)_(\\d+)$", "$1.$2"); + assertThat(luceneVersion, Matchers.equalTo(Lucene.parseVersionLenient(string, null))); + } + } } diff --git a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java index 1b852bd7cd4d9..0018c9cf1d7da 100644 --- a/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java +++ b/server/src/test/java/org/elasticsearch/index/analysis/PreBuiltAnalyzerTests.java @@ -69,7 +69,9 @@ public void testThatInstancesAreCachedAndReused() { ); // Same Lucene version should be cached: - assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(IndexVersion.V_8_0_0), PreBuiltAnalyzers.STOP.getAnalyzer(IndexVersion.V_8_0_1)); + IndexVersion v1 = IndexVersionUtils.randomVersion(random()); + IndexVersion v2 = new IndexVersion(v1.id() - 1, v1.luceneVersion()); + assertSame(PreBuiltAnalyzers.STOP.getAnalyzer(v1), PreBuiltAnalyzers.STOP.getAnalyzer(v2)); } public void testThatAnalyzersAreUsedInMapping() throws IOException { diff --git a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java index 2297c914a64fd..e05be91c651c1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/DynamicMappingTests.java @@ -9,16 +9,21 @@ import org.apache.lucene.index.IndexableField; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; import java.io.IOException; import java.time.Instant; +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MAX_DIMS_COUNT; +import static org.elasticsearch.index.mapper.vectors.DenseVectorFieldMapper.MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -942,4 +947,48 @@ public void testSubobjectsFalseRootDynamicUpdate() throws Exception { assertEquals(0, mapperService.mappingLookup().objectMappers().size()); } + + public void testDefaultDenseVectorMappings() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> b.field("numeric_detection", true))); + doTestDefaultDenseVectorMappings(mapper, XContentFactory.jsonBuilder()); + doTestDefaultDenseVectorMappings(mapper, XContentFactory.yamlBuilder()); + doTestDefaultDenseVectorMappings(mapper, XContentFactory.smileBuilder()); + doTestDefaultDenseVectorMappings(mapper, XContentFactory.cborBuilder()); + } + + private void doTestDefaultDenseVectorMappings(DocumentMapper mapper, XContentBuilder builder) throws IOException { + BytesReference source = BytesReference.bytes( + builder.startObject() + .field("mapsToFloatTooSmall", Randomness.get().doubles(MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING - 1, 0.0, 5.0).toArray()) + .field("mapsToFloatTooBig", Randomness.get().doubles(MAX_DIMS_COUNT + 1, 0.0, 5.0).toArray()) + .field("mapsToDenseVector", Randomness.get().doubles(MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING, 0.0, 5.0).toArray()) + .endObject() + ); + ParsedDocument parsedDocument = mapper.parse(new SourceToParse("id", source, builder.contentType())); + Mapping update = parsedDocument.dynamicMappingsUpdate(); + assertNotNull(update); + assertThat(((FieldMapper) update.getRoot().getMapper("mapsToFloatTooSmall")).fieldType().typeName(), equalTo("float")); + assertThat(((FieldMapper) update.getRoot().getMapper("mapsToFloatTooBig")).fieldType().typeName(), equalTo("float")); + assertThat(((FieldMapper) update.getRoot().getMapper("mapsToDenseVector")).fieldType().typeName(), equalTo("dense_vector")); + DenseVectorFieldMapper dvFieldMapper = ((DenseVectorFieldMapper) update.getRoot().getMapper("mapsToDenseVector")); + } + + public void testDefaultDenseVectorMappingsObject() throws IOException { + DocumentMapper mapper = createDocumentMapper(topMapping(b -> b.field("numeric_detection", true))); + BytesReference source = BytesReference.bytes( + XContentFactory.jsonBuilder() + .startObject() + .startObject("parent_object") + .field("mapsToFloatTooSmall", Randomness.get().doubles(MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING - 1, 0.0, 5.0).toArray()) + .field("mapsToFloatTooBig", Randomness.get().doubles(MAX_DIMS_COUNT + 1, 0.0, 5.0).toArray()) + .field("mapsToDenseVector", Randomness.get().doubles(MIN_DIMS_FOR_DYNAMIC_FLOAT_MAPPING, 0.0, 5.0).toArray()) + .endObject() + .endObject() + ); + ParsedDocument parsedDocument = mapper.parse(new SourceToParse("id", source, XContentType.JSON)); + Mapping update = parsedDocument.dynamicMappingsUpdate(); + assertNotNull(update); + ObjectMapper parent = (ObjectMapper) update.getRoot().getMapper("parent_object"); + assertThat(((FieldMapper) parent.getMapper("mapsToDenseVector")).fieldType().typeName(), equalTo("dense_vector")); + } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java index 4e1c34d68e603..ddb81727fd399 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/MappingParserTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.settings.Settings; @@ -319,8 +320,8 @@ public void testBlankFieldNameBefore8_6_0() throws Exception { IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.V_8_5_0); TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, - TransportVersion.V_8_5_0 + TransportVersions.MINIMUM_COMPATIBLE, + TransportVersions.V_8_5_0 ); { XContentBuilder builder = mapping(b -> b.startObject(" ").field("type", randomFieldType()).endObject()); diff --git a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java index 9b381abf8e47b..248d94cb67a63 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/ParametrizedMapperTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.lucene.Lucene; @@ -533,8 +534,8 @@ public void testBWCunknownParametersfromDynamicTemplates() { IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.V_8_0_0), TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_0_0) + TransportVersions.V_7_0_0, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_0_0) ), true ); @@ -548,7 +549,7 @@ public void testBWCunknownParametersfromDynamicTemplates() { MapperParsingException ex = expectThrows( MapperParsingException.class, - () -> fromMapping(mapping, IndexVersion.V_8_0_0, TransportVersion.V_8_0_0, true) + () -> fromMapping(mapping, IndexVersion.V_8_0_0, TransportVersions.V_8_0_0, true) ); assertEquals("unknown parameter [some_unknown_parameter] on mapper [field] of type [test_mapper]", ex.getMessage()); } @@ -585,7 +586,7 @@ public void testDeprecatedParameters() { // 'index' is declared explicitly, 'store' is not, but is one of the previously always-accepted params String mapping = """ {"type":"test_mapper","index":false,"store":true,"required":"value"}"""; - TestMapper mapper = fromMapping(mapping, IndexVersion.V_7_8_0, TransportVersion.V_7_8_0); + TestMapper mapper = fromMapping(mapping, IndexVersion.V_7_8_0, TransportVersions.V_7_8_0); assertWarnings("Parameter [store] has no effect on type [test_mapper] and will be removed in future"); assertFalse(mapper.index); assertEquals(""" @@ -593,7 +594,7 @@ public void testDeprecatedParameters() { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> fromMapping(mapping, IndexVersion.V_8_0_0, TransportVersion.V_8_0_0) + () -> fromMapping(mapping, IndexVersion.V_8_0_0, TransportVersions.V_8_0_0) ); assertEquals("unknown parameter [store] on mapper [field] of type [test_mapper]", e.getMessage()); } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java index 31d39cc167b94..5eab2951d9e2d 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/TypeParsersTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.index.mapper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -90,7 +91,7 @@ public void testMultiFieldWithinMultiField() throws IOException { type -> typeParser, type -> null, olderVersion, - () -> TransportVersion.MINIMUM_COMPATIBLE, + () -> TransportVersions.MINIMUM_COMPATIBLE, null, ScriptCompiler.NONE, mapperService.getIndexAnalyzers(), @@ -113,7 +114,7 @@ public void testMultiFieldWithinMultiField() throws IOException { IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.current()); TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_8_0_0, + TransportVersions.V_8_0_0, TransportVersion.current() ); MappingParserContext context = new MappingParserContext( diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java index 9fc812028609b..726ce0aa043c1 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldMapperTests.java @@ -230,13 +230,6 @@ public void testDims() { ) ); } - { - Exception e = expectThrows( - MapperParsingException.class, - () -> createMapperService(fieldMapping(b -> b.field("type", "dense_vector"))) - ); - assertThat(e.getMessage(), equalTo("Failed to parse mapping: Missing required parameter [dims] for field [field]")); - } } public void testDefaults() throws Exception { @@ -327,7 +320,7 @@ public void testDotProductWithInvalidNorm() throws Exception { b -> b.field("type", "dense_vector").field("dims", 3).field("index", true).field("similarity", VectorSimilarity.DOT_PRODUCT) ) ); - float[] vector = { -12.1f, 2.7f, -4 }; + float[] vector = { 0f, 0f, 0f }; DocumentParsingException e = expectThrows( DocumentParsingException.class, () -> mapper.parse(source(b -> b.array("field", vector))) @@ -336,23 +329,7 @@ public void testDotProductWithInvalidNorm() throws Exception { assertThat( e.getCause().getMessage(), containsString( - "The [dot_product] similarity can only be used with unit-length vectors. Preview of invalid vector: [-12.1, 2.7, -4.0]" - ) - ); - - DocumentMapper mapperWithLargerDim = createDocumentMapper( - fieldMapping( - b -> b.field("type", "dense_vector").field("dims", 6).field("index", true).field("similarity", VectorSimilarity.DOT_PRODUCT) - ) - ); - float[] largerVector = { -12.1f, 2.7f, -4, 1.05f, 10.0f, 29.9f }; - e = expectThrows(DocumentParsingException.class, () -> mapperWithLargerDim.parse(source(b -> b.array("field", largerVector)))); - assertNotNull(e.getCause()); - assertThat( - e.getCause().getMessage(), - containsString( - "The [dot_product] similarity can only be used with unit-length vectors. " - + "Preview of invalid vector: [-12.1, 2.7, -4.0, 1.05, 10.0, ...]" + "The [dot_product] similarity does not support vectors with zero magnitude. Preview of invalid vector: [0.0, 0.0, 0.0]" ) ); } @@ -522,7 +499,7 @@ public void testDefaultParamsBeforeIndexByDefault() throws Exception { assertNull(denseVectorFieldType.getSimilarity()); } - public void testtParamsBeforeIndexByDefault() throws Exception { + public void testParamsBeforeIndexByDefault() throws Exception { DocumentMapper documentMapper = createDocumentMapper(INDEXED_BY_DEFAULT_PREVIOUS_INDEX_VERSION, fieldMapping(b -> { b.field("type", "dense_vector").field("dims", 3).field("index", true).field("similarity", "dot_product"); })); @@ -998,6 +975,8 @@ private void mapping(XContentBuilder b) throws IOException { b.field("ef_construction", 50); b.endObject(); } + } else { + b.field("index", false); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java index d22056d49beb5..448d6aff0f4e8 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/DenseVectorFieldTypeTests.java @@ -137,9 +137,9 @@ public void testFloatCreateKnnQuery() { ); e = expectThrows( IllegalArgumentException.class, - () -> dotProductField.createKnnQuery(new float[] { 0.3f, 0.1f, 1.0f }, 10, null, null) + () -> dotProductField.createKnnQuery(new float[] { 0.0f, 0.0f, 0.0f }, 10, null, null) ); - assertThat(e.getMessage(), containsString("The [dot_product] similarity can only be used with unit-length vectors.")); + assertThat(e.getMessage(), containsString("The [dot_product] similarity does not support vectors with zero magnitude.")); DenseVectorFieldType cosineField = new DenseVectorFieldType( "f", diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java index 9ad906c31c74a..9ea63325ef3ad 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldMapperTests.java @@ -8,93 +8,218 @@ package org.elasticsearch.index.mapper.vectors; -import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.apache.lucene.analysis.TokenStream; +import org.apache.lucene.analysis.tokenattributes.TermFrequencyAttribute; +import org.apache.lucene.document.FeatureField; +import org.apache.lucene.index.IndexableField; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.compress.CompressedXContent; -import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.index.IndexService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.mapper.DocumentMapper; import org.elasticsearch.index.mapper.DocumentParsingException; import org.elasticsearch.index.mapper.MappedFieldType; -import org.elasticsearch.index.mapper.MapperBuilderContext; import org.elasticsearch.index.mapper.MapperParsingException; import org.elasticsearch.index.mapper.MapperService; +import org.elasticsearch.index.mapper.MapperTestCase; +import org.elasticsearch.index.mapper.ParsedDocument; import org.elasticsearch.index.mapper.SourceToParse; -import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentType; +import org.hamcrest.Matchers; +import org.junit.AssumptionViolatedException; +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper.NEW_SPARSE_VECTOR_INDEX_VERSION; +import static org.elasticsearch.index.mapper.vectors.SparseVectorFieldMapper.PREVIOUS_SPARSE_VECTOR_INDEX_VERSION; import static org.hamcrest.Matchers.containsString; -public class SparseVectorFieldMapperTests extends ESSingleNodeTestCase { +public class SparseVectorFieldMapperTests extends MapperTestCase { + + @Override + protected Object getSampleValueForDocument() { + return Map.of("ten", 10, "twenty", 20); + } + + @Override + protected Object getSampleObjectForDocument() { + return getSampleValueForDocument(); + } + + @Override + protected void assertExistsQuery(MapperService mapperService) { + IllegalArgumentException iae = expectThrows(IllegalArgumentException.class, () -> super.assertExistsQuery(mapperService)); + assertEquals("[sparse_vector] fields do not support [exists] queries", iae.getMessage()); + } - // this allows to set indexVersion as it is a private setting @Override - protected boolean forbidPrivateIndexSettings() { + protected void minimalMapping(XContentBuilder b) throws IOException { + b.field("type", "sparse_vector"); + } + + @Override + protected boolean supportsStoredFields() { return false; } - public void testValueFetcherIsNotSupported() { - SparseVectorFieldMapper.Builder builder = new SparseVectorFieldMapper.Builder("field"); - MappedFieldType fieldMapper = builder.build(MapperBuilderContext.root(false)).fieldType(); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldMapper.valueFetcher(null, null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); + @Override + protected boolean supportsIgnoreMalformed() { + return false; } - public void testSparseVectorWith8xIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomVersionBetween(random(), IndexVersion.V_8_0_0, IndexVersion.current()); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + @Override + protected void registerParameters(ParameterChecker checker) throws IOException {} - IndexService indexService = createIndex("index", settings); - MapperService mapperService = indexService.mapperService(); + @Override + protected boolean supportsMeta() { + return false; + } - BytesReference mapping = BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("my-vector") - .field("type", "sparse_vector") - .endObject() - .endObject() - .endObject() - .endObject() - ); + private static int getFrequency(TokenStream tk) throws IOException { + TermFrequencyAttribute freqAttribute = tk.addAttribute(TermFrequencyAttribute.class); + tk.reset(); + assertTrue(tk.incrementToken()); + int freq = freqAttribute.getTermFrequency(); + assertFalse(tk.incrementToken()); + return freq; + } - MapperParsingException e = expectThrows( - MapperParsingException.class, - () -> mapperService.parseMapping(MapperService.SINGLE_MAPPING_NAME, new CompressedXContent(mapping)) - ); - assertThat(e.getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE)); + public void testDefaults() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + assertEquals(Strings.toString(fieldMapping(this::minimalMapping)), mapper.mappingSource().toString()); + + ParsedDocument doc1 = mapper.parse(source(this::writeField)); + + List fields = doc1.rootDoc().getFields("field"); + assertEquals(2, fields.size()); + assertThat(fields.get(0), Matchers.instanceOf(FeatureField.class)); + FeatureField featureField1 = null; + FeatureField featureField2 = null; + for (IndexableField field : fields) { + if (field.stringValue().equals("ten")) { + featureField1 = (FeatureField) field; + } else if (field.stringValue().equals("twenty")) { + featureField2 = (FeatureField) field; + } else { + throw new UnsupportedOperationException(); + } + } + + int freq1 = getFrequency(featureField1.tokenStream(null, null)); + int freq2 = getFrequency(featureField2.tokenStream(null, null)); + assertTrue(freq1 < freq2); } - public void testSparseVectorWith7xIndex() throws Exception { - IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), IndexVersion.V_8_0_0); - Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, version).build(); + public void testDotInFieldName() throws Exception { + DocumentMapper mapper = createDocumentMapper(fieldMapping(this::minimalMapping)); + DocumentParsingException ex = expectThrows( + DocumentParsingException.class, + () -> mapper.parse(source(b -> b.field("field", Map.of("politi.cs", 10, "sports", 20)))) + ); + assertThat(ex.getCause().getMessage(), containsString("do not support dots in feature names")); + assertThat(ex.getCause().getMessage(), containsString("politi.cs")); + } - IndexService indexService = createIndex("index", settings); - MapperService mapperService = indexService.mapperService(); + public void testRejectMultiValuedFields() throws MapperParsingException, IOException { + DocumentMapper mapper = createDocumentMapper(mapping(b -> { + b.startObject("field").field("type", "sparse_vector").endObject(); + b.startObject("foo").startObject("properties"); + { + b.startObject("field").field("type", "sparse_vector").endObject(); + } + b.endObject().endObject(); + })); - BytesReference mapping = BytesReference.bytes( - XContentFactory.jsonBuilder() - .startObject() - .startObject("_doc") - .startObject("properties") - .startObject("my-vector") - .field("type", "sparse_vector") - .endObject() - .endObject() - .endObject() - .endObject() + DocumentParsingException e = expectThrows( + DocumentParsingException.class, + () -> mapper.parse(source(b -> b.startObject("field").field("foo", Arrays.asList(10, 20)).endObject())) + ); + assertEquals( + "[sparse_vector] fields take hashes that map a feature to a strictly positive float, but got unexpected token " + "START_ARRAY", + e.getCause().getMessage() ); - DocumentMapper mapper = mapperService.merge( - MapperService.SINGLE_MAPPING_NAME, - new CompressedXContent(mapping), - MapperService.MergeReason.MAPPING_UPDATE + e = expectThrows(DocumentParsingException.class, () -> mapper.parse(source(b -> { + b.startArray("foo"); + { + b.startObject().startObject("field").field("bar", 10).endObject().endObject(); + b.startObject().startObject("field").field("bar", 20).endObject().endObject(); + } + b.endArray(); + }))); + assertEquals( + "[sparse_vector] fields do not support indexing multiple values for the same feature [foo.field.bar] in " + "the same document", + e.getCause().getMessage() ); + } + + public void testCannotBeUsedInMultiFields() { + Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(fieldMapping(b -> { + b.field("type", "keyword"); + b.startObject("fields"); + b.startObject("feature"); + b.field("type", "sparse_vector"); + b.endObject(); + b.endObject(); + }))); + assertThat(e.getMessage(), containsString("Field [feature] of type [sparse_vector] can't be used in multifields")); + } + + @Override + protected Object generateRandomInputValue(MappedFieldType ft) { + assumeFalse("Test implemented in a follow up", true); + return null; + } + + @Override + protected boolean allowsNullValues() { + return false; // TODO should this allow null values? + } + + @Override + protected SyntheticSourceSupport syntheticSourceSupport(boolean syntheticSource) { + throw new AssumptionViolatedException("not supported"); + } + + @Override + protected IngestScriptSupport ingestScriptSupport() { + throw new AssumptionViolatedException("not supported"); + } + + @Override + protected String[] getParseMinimalWarnings(IndexVersion indexVersion) { + String[] additionalWarnings = null; + if (indexVersion.before(PREVIOUS_SPARSE_VECTOR_INDEX_VERSION)) { + additionalWarnings = new String[] { SparseVectorFieldMapper.ERROR_MESSAGE_7X }; + } + return Strings.concatStringArrays(super.getParseMinimalWarnings(indexVersion), additionalWarnings); + } + + @Override + protected IndexVersion boostNotAllowedIndexVersion() { + return NEW_SPARSE_VECTOR_INDEX_VERSION; + } + + public void testSparseVectorWith7xIndex() throws Exception { + IndexVersion version = IndexVersionUtils.randomPreviousCompatibleVersion(random(), PREVIOUS_SPARSE_VECTOR_INDEX_VERSION); + + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .startObject("_doc") + .startObject("properties") + .startObject("my-vector") + .field("type", "sparse_vector") + .endObject() + .endObject() + .endObject() + .endObject(); + + DocumentMapper mapper = createDocumentMapper(version, builder); assertWarnings(SparseVectorFieldMapper.ERROR_MESSAGE_7X); // Check that new vectors cannot be indexed. @@ -115,6 +240,18 @@ public void testSparseVectorWith7xIndex() throws Exception { DocumentParsingException.class, () -> mapper.parse(new SourceToParse("id", source, XContentType.JSON)) ); - assertThat(indexException.getCause().getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE)); + assertThat(indexException.getCause().getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE_7X)); + } + + public void testSparseVectorUnsupportedIndex() throws Exception { + IndexVersion version = IndexVersionUtils.randomVersionBetween( + random(), + PREVIOUS_SPARSE_VECTOR_INDEX_VERSION, + IndexVersion.V_8_500_000 + ); + Exception e = expectThrows(MapperParsingException.class, () -> createMapperService(version, fieldMapping(b -> { + b.field("type", "sparse_vector"); + }))); + assertThat(e.getMessage(), containsString(SparseVectorFieldMapper.ERROR_MESSAGE_8X)); } } diff --git a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java index 574fb63cd3fb0..1575d71110c42 100644 --- a/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java +++ b/server/src/test/java/org/elasticsearch/index/mapper/vectors/SparseVectorFieldTypeTests.java @@ -26,22 +26,4 @@ public void testIsNotAggregatable() { MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); assertFalse(fieldType.isAggregatable()); } - - public void testDocValueFormatIsNotSupported() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldType.docValueFormat(null, null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); - } - - public void testExistsQueryIsNotSupported() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldType.existsQuery(null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); - } - - public void testTermQueryIsNotSupported() { - MappedFieldType fieldType = new SparseVectorFieldMapper.SparseVectorFieldType("field", Collections.emptyMap()); - UnsupportedOperationException exc = expectThrows(UnsupportedOperationException.class, () -> fieldType.termQuery(null, null)); - assertEquals(SparseVectorFieldMapper.ERROR_MESSAGE_7X, exc.getMessage()); - } } diff --git a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java index 0e5f924ca8753..1257fa43f3216 100644 --- a/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/SpanMultiTermQueryBuilderTests.java @@ -28,6 +28,7 @@ import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.StreamOutput; @@ -167,7 +168,7 @@ public String fieldName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java index ffda772b1a152..20d5fdae5e4cf 100644 --- a/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/query/functionscore/FunctionScoreTests.java @@ -697,7 +697,7 @@ public void testPropagatesApproximations() throws IOException { FunctionScoreQuery fsq = new FunctionScoreQuery(query, null, Float.POSITIVE_INFINITY); for (org.apache.lucene.search.ScoreMode scoreMode : org.apache.lucene.search.ScoreMode.values()) { Weight weight = searcher.createWeight(fsq, scoreMode, 1f); - Scorer scorer = weight.scorer(reader.leaves().get(0)); + Scorer scorer = weight.scorer(searcher.getIndexReader().leaves().get(0)); assertNotNull(scorer.twoPhaseIterator()); } } diff --git a/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java b/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java index f7e281c1db60e..3c427649b5200 100644 --- a/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java +++ b/server/src/test/java/org/elasticsearch/index/reindex/WorkerBulkByScrollTaskStateTests.java @@ -22,6 +22,7 @@ import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.Delayed; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -134,7 +135,7 @@ public void testDelayAndRethrottle() throws IOException, InterruptedException { int batchSizeForMaxDelay = (int) (maxDelay.seconds() * originalRequestsPerSecond); ThreadPool threadPool = new TestThreadPool(getTestName()) { @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String name) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor name) { assertThat(delay.nanos(), both(greaterThanOrEqualTo(0L)).and(lessThanOrEqualTo(maxDelay.nanos()))); return super.schedule(command, delay, name); } @@ -185,7 +186,7 @@ public void onFailure(Exception e) { public void testDelayNeverNegative() throws IOException { // Thread pool that returns a ScheduledFuture that claims to have a negative delay ThreadPool threadPool = new TestThreadPool("test") { - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String name) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor name) { return new ScheduledCancellable() { @Override public long getDelay(TimeUnit unit) { diff --git a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java index 653b00c15b313..97e58a4a16f23 100644 --- a/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java +++ b/server/src/test/java/org/elasticsearch/index/search/nested/NestedSortingTests.java @@ -99,7 +99,7 @@ public void testDuel() throws Exception { MultiValueMode sortMode = randomFrom(Arrays.asList(MultiValueMode.MIN, MultiValueMode.MAX)); DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); - IndexSearcher searcher = newSearcher(reader); + IndexSearcher searcher = newSearcher(reader, false); PagedBytesIndexFieldData indexFieldData1 = getForField("f"); IndexFieldData indexFieldData2 = NoOrdinalsStringFieldDataTests.hideOrdinals(indexFieldData1); final String missingValue = randomBoolean() ? null : TestUtil.randomSimpleString(random(), 2); @@ -291,7 +291,7 @@ public void testNestedSorting() throws Exception { MultiValueMode sortMode = MultiValueMode.MIN; DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); - IndexSearcher searcher = newSearcher(reader); + IndexSearcher searcher = newSearcher(reader, false); PagedBytesIndexFieldData indexFieldData = getForField("field2"); Query parentFilter = new TermQuery(new Term("_nested_path", "parent")); Query childFilter = Queries.not(parentFilter); @@ -612,7 +612,7 @@ public void testMultiLevelNestedSorting() throws IOException { } DirectoryReader reader = DirectoryReader.open(writer); reader = ElasticsearchDirectoryReader.wrap(reader, new ShardId(indexService.index(), 0)); - IndexSearcher searcher = newSearcher(reader); + IndexSearcher searcher = newSearcher(reader, false); SearchExecutionContext searchExecutionContext = indexService.newSearchExecutionContext(0, 0, searcher, () -> 0L, null, emptyMap()); FieldSortBuilder sortBuilder = new FieldSortBuilder("chapters.paragraphs.word_count"); diff --git a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java index ccd4d9b77010b..a3061df8839fb 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java @@ -1748,6 +1748,9 @@ public void testRefreshMetric() throws IOException { } indexDoc(shard, "_doc", "test"); shard.writeIndexingBuffer(); + // This did not actually run a refresh, it called IndexWriter#flushNextBuffer() + assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount + 1)); + shard.refresh("force"); assertThat(shard.refreshStats().getTotal(), equalTo(refreshCount + 2)); closeShards(shard); } @@ -1772,9 +1775,10 @@ public void testExternalRefreshMetric() throws IOException { assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 1 - extraInternalRefreshes)); } indexDoc(shard, "_doc", "test"); + // This runs IndexWriter#flushNextBuffer internally shard.writeIndexingBuffer(); assertThat(shard.refreshStats().getExternalTotal(), equalTo(externalRefreshCount)); - assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 2 - extraInternalRefreshes)); + assertThat(shard.refreshStats().getExternalTotal(), equalTo(shard.refreshStats().getTotal() - 1 - extraInternalRefreshes)); closeShards(shard); } diff --git a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java index 62d32af5877af..32a4423cac3ed 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/RefreshListenersTests.java @@ -24,6 +24,7 @@ import org.elasticsearch.common.metrics.MeanMetric; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Nullable; @@ -56,7 +57,6 @@ import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.xcontent.XContentType; import org.junit.After; import org.junit.Before; @@ -398,7 +398,11 @@ public void testLotsOfThreads() throws Exception { maxListeners = between(1, threadCount * 2); // This thread just refreshes every once in a while to cause trouble. - Cancellable refresher = threadPool.scheduleWithFixedDelay(() -> engine.refresh("because test"), timeValueMillis(100), Names.SAME); + Cancellable refresher = threadPool.scheduleWithFixedDelay( + () -> engine.refresh("because test"), + timeValueMillis(100), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); // These threads add and block until the refresh makes the change visible and then do a non-realtime get. Thread[] indexers = new Thread[threadCount]; diff --git a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java index ad3b6d46a6f51..851ad18500add 100644 --- a/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java +++ b/server/src/test/java/org/elasticsearch/index/shard/ShardSplittingQueryTests.java @@ -155,7 +155,7 @@ void assertSplit(Directory dir, IndexMetadata metadata, int targetShardId, boole ScoreMode.COMPLETE_NO_SCORES, 1f ); - final List leaves = reader.leaves(); + final List leaves = searcher.getIndexReader().leaves(); for (final LeafReaderContext ctx : leaves) { Scorer scorer = splitWeight.scorer(ctx); DocIdSetIterator iterator = scorer.iterator(); diff --git a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java index 9c42aabddcc3a..3bb6265e2c94d 100644 --- a/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java +++ b/server/src/test/java/org/elasticsearch/index/similarity/ScriptedSimilarityTests.java @@ -146,6 +146,7 @@ public double execute( assertEquals(1, topDocs.totalHits.value); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); + r.close(); w.close(); dir.close(); } @@ -238,6 +239,7 @@ public double execute( assertTrue(initCalled.get()); assertTrue(called.get()); assertEquals(42, topDocs.scoreDocs[0].score, 0); + r.close(); w.close(); dir.close(); } diff --git a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java index 42a98a6c2f610..2f1bdfd82c1c1 100644 --- a/server/src/test/java/org/elasticsearch/index/store/StoreTests.java +++ b/server/src/test/java/org/elasticsearch/index/store/StoreTests.java @@ -109,7 +109,7 @@ public class StoreTests extends ESTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", - Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build() + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build() ); private static final Version MIN_SUPPORTED_LUCENE_VERSION = IndexVersion.MINIMUM_COMPATIBLE.luceneVersion(); @@ -757,7 +757,7 @@ public void testOnCloseCallback() throws IOException { public void testStoreStats() throws IOException { final ShardId shardId = new ShardId("index", "_na_", 1); Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)) .build(); Store store = new Store( @@ -860,7 +860,7 @@ public void writeBytes(byte[] b, int offset, int length) throws IOException { IndexSettingsModule.newIndexSettings( "index", Settings.builder() - .put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .put(Store.INDEX_STORE_STATS_REFRESH_INTERVAL_SETTING.getKey(), TimeValue.timeValueMinutes(0)) .build() ), diff --git a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java index 4c3ff1238bd06..627ff0f63d2c7 100644 --- a/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java +++ b/server/src/test/java/org/elasticsearch/index/translog/TranslogTests.java @@ -26,6 +26,7 @@ import org.apache.lucene.tests.util.LineFileDocs; import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; @@ -48,6 +49,7 @@ import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.VersionType; import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine.Operation.Origin; @@ -272,7 +274,7 @@ private Translog create(Path path) throws IOException { } private TranslogConfig getTranslogConfig(final Path path) { - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build(); return getTranslogConfig(path, settings); } @@ -1627,7 +1629,7 @@ public void testTranslogWriterLastModifiedTime() throws IOException { public void testTranslogOperationListener() throws IOException { Path tempDir = createTempDir(); - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build(); final ArrayList seqNos = new ArrayList<>(); final ArrayList locations = new ArrayList<>(); @@ -3401,7 +3403,7 @@ public void testTranslogOpSerialization() throws Exception { TransportVersion wireVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current() ); BytesStreamOutput out = new BytesStreamOutput(); diff --git a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java index 0ffda645f7a48..0c21e80290bd3 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndexingMemoryControllerTests.java @@ -7,20 +7,16 @@ */ package org.elasticsearch.indices; -import org.apache.lucene.search.ReferenceManager; -import org.apache.lucene.util.SetOnce; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.common.util.BigArrays; -import org.elasticsearch.index.codec.CodecService; -import org.elasticsearch.index.engine.EngineConfig; import org.elasticsearch.index.engine.InternalEngine; -import org.elasticsearch.index.refresh.RefreshStats; +import org.elasticsearch.index.engine.InternalEngineFactory; import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShardTestCase; +import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.ThreadPool; @@ -42,6 +38,7 @@ import static java.util.Collections.emptySet; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; public class IndexingMemoryControllerTests extends IndexShardTestCase { @@ -96,7 +93,7 @@ protected long getShardWritingBytes(IndexShard shard) { protected void checkIdle(IndexShard shard, long inactiveTimeNS) {} @Override - public void writeIndexingBufferAsync(IndexShard shard) { + public void enqueueWriteIndexingBuffer(IndexShard shard) { long bytes = indexBufferRAMBytesUsed.put(shard, 0L); writingBytes.put(shard, writingBytes.get(shard) + bytes); indexBufferRAMBytesUsed.put(shard, 0L); @@ -162,12 +159,18 @@ protected Cancellable scheduleTask(ThreadPool threadPool) { public void testShardAdditionAndRemoval() throws IOException { MockController controller = new MockController(Settings.builder().put("indices.memory.index_buffer_size", "4mb").build()); - IndexShard shard0 = newStartedShard(); + IndexShard shard0 = newStartedShard( + p -> newShard(p, new ShardId("index0", "uuid0", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard0); controller.assertBuffer(shard0, 1); // add another shard - IndexShard shard1 = newStartedShard(); + IndexShard shard1 = newStartedShard( + p -> newShard(p, new ShardId("index1", "uuid1", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard1); controller.assertBuffer(shard0, 1); controller.assertBuffer(shard1, 1); @@ -192,9 +195,15 @@ public void testActiveInactive() throws IOException { MockController controller = new MockController(Settings.builder().put("indices.memory.index_buffer_size", "5mb").build()); - IndexShard shard0 = newStartedShard(); + IndexShard shard0 = newStartedShard( + p -> newShard(p, new ShardId("index0", "uuid0", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard0); - IndexShard shard1 = newStartedShard(); + IndexShard shard1 = newStartedShard( + p -> newShard(p, new ShardId("index1", "uuid1", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); controller.simulateIndexing(shard1); controller.assertBuffer(shard0, 1); @@ -206,7 +215,7 @@ public void testActiveInactive() throws IOException { controller.assertBuffer(shard0, 2); controller.assertBuffer(shard1, 2); - // index into one shard only, crosses the 5mb limit, so shard1 is refreshed + // index into one shard only, crosses the 5mb limit, so shard0 is refreshed controller.simulateIndexing(shard0); controller.simulateIndexing(shard0); controller.assertBuffer(shard0, 0); @@ -285,28 +294,37 @@ public void testMaxBufferSizes() { public void testThrottling() throws Exception { MockController controller = new MockController(Settings.builder().put("indices.memory.index_buffer_size", "4mb").build()); - IndexShard shard0 = newStartedShard(); - IndexShard shard1 = newStartedShard(); - controller.simulateIndexing(shard0); + IndexShard shard0 = newStartedShard( + p -> newShard(p, new ShardId("index0", "uuid0", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); + IndexShard shard1 = newStartedShard( + p -> newShard(p, new ShardId("index1", "uuid1", 0), Settings.EMPTY, new InternalEngineFactory()), + randomBoolean() + ); + + assertThat(shard0.routingEntry().shardId(), lessThan(shard1.routingEntry().shardId())); + controller.simulateIndexing(shard0); controller.simulateIndexing(shard0); - controller.assertBuffer(shard0, 3); + controller.assertBuffer(shard0, 2); + controller.simulateIndexing(shard1); controller.simulateIndexing(shard1); controller.simulateIndexing(shard1); - // We are now using 5 MB, so we should be writing shard0 since it's using the most heap: - controller.assertWriting(shard0, 3); + // We are now using 5 MB, so we should be writing shard0 since shards get flushed by increasing shard id, even though shard1 uses + // more RAM buffer + controller.assertWriting(shard0, 2); controller.assertWriting(shard1, 0); controller.assertBuffer(shard0, 0); - controller.assertBuffer(shard1, 2); + controller.assertBuffer(shard1, 3); controller.simulateIndexing(shard0); controller.simulateIndexing(shard1); - controller.simulateIndexing(shard1); - // Now we are still writing 3 MB (shard0), and using 5 MB index buffers, so we should now 1) be writing shard1, - // and 2) be throttling shard1: - controller.assertWriting(shard0, 3); + // We crossed the limit again, so now we should be writing the next shard after shard0: shard1. And since bytes are still being + // written and haven't been released yet, we should be throttling the same shard we flushed: shard1. + controller.assertWriting(shard0, 2); controller.assertWriting(shard1, 4); controller.assertBuffer(shard0, 1); controller.assertBuffer(shard1, 0); @@ -323,7 +341,7 @@ public void testThrottling() throws Exception { controller.simulateIndexing(shard0); // Now we are using 5 MB again, so shard0 should also be writing and now also be throttled: - controller.assertWriting(shard0, 8); + controller.assertWriting(shard0, 7); controller.assertWriting(shard1, 4); controller.assertBuffer(shard0, 0); controller.assertBuffer(shard1, 0); @@ -354,7 +372,7 @@ public void testTranslogRecoveryWorksWithIMC() throws IOException { AtomicInteger flushes = new AtomicInteger(); IndexingMemoryController imc = new IndexingMemoryController(settings, threadPool, iterable) { @Override - protected void writeIndexingBufferAsync(IndexShard shard) { + protected void enqueueWriteIndexingBuffer(IndexShard shard) { assertEquals(shard, shardRef.get()); flushes.incrementAndGet(); shard.writeIndexingBuffer(); @@ -372,40 +390,6 @@ protected void writeIndexingBufferAsync(IndexShard shard) { closeShards(shard); } - EngineConfig configWithRefreshListener(EngineConfig config, ReferenceManager.RefreshListener listener) { - final List internalRefreshListener = new ArrayList<>(config.getInternalRefreshListener()); - ; - internalRefreshListener.add(listener); - return new EngineConfig( - config.getShardId(), - config.getThreadPool(), - config.getIndexSettings(), - config.getWarmer(), - config.getStore(), - config.getMergePolicy(), - config.getAnalyzer(), - config.getSimilarity(), - new CodecService(null, BigArrays.NON_RECYCLING_INSTANCE), - config.getEventListener(), - config.getQueryCache(), - config.getQueryCachingPolicy(), - config.getTranslogConfig(), - config.getFlushMergesAfter(), - config.getExternalRefreshListener(), - internalRefreshListener, - config.getIndexSort(), - config.getCircuitBreakerService(), - config.getGlobalCheckpointSupplier(), - config.retentionLeasesSupplier(), - config.getPrimaryTermSupplier(), - config.getSnapshotCommitSupplier(), - config.getLeafSorter(), - config.getRelativeTimeInNanosSupplier(), - config.getIndexCommitListener(), - config.isPromotableToPrimary() - ); - } - ThreadPoolStats.Stats getRefreshThreadPoolStats() { final ThreadPoolStats stats = threadPool.stats(); for (ThreadPoolStats.Stats s : stats) { @@ -416,32 +400,19 @@ ThreadPoolStats.Stats getRefreshThreadPoolStats() { throw new AssertionError("refresh thread pool stats not found [" + stats + "]"); } - public void testSkipRefreshIfShardIsRefreshingAlready() throws Exception { - SetOnce refreshLatch = new SetOnce<>(); - ReferenceManager.RefreshListener refreshListener = new ReferenceManager.RefreshListener() { + public void testSkipIfPendingAlready() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + IndexShard shard = newStartedShard(randomBoolean(), Settings.EMPTY, config -> new InternalEngine(config) { @Override - public void beforeRefresh() { - if (refreshLatch.get() != null) { - try { - refreshLatch.get().await(); - } catch (InterruptedException e) { - throw new AssertionError(e); - } + public void writeIndexingBuffer() throws IOException { + try { + latch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); } + super.writeIndexingBuffer(); } - - @Override - public void afterRefresh(boolean didRefresh) { - - } - }; - IndexShard shard = newStartedShard( - randomBoolean(), - Settings.EMPTY, - config -> new InternalEngine(configWithRefreshListener(config, refreshListener)) - ); - refreshLatch.set(new CountDownLatch(1)); // block refresh - final RefreshStats refreshStats = shard.refreshStats(); + }); final IndexingMemoryController controller = new IndexingMemoryController( Settings.builder() .put("indices.memory.interval", "200h") // disable it @@ -460,21 +431,24 @@ protected long getShardWritingBytes(IndexShard shard) { return 0L; } }; - int iterations = randomIntBetween(10, 100); ThreadPoolStats.Stats beforeStats = getRefreshThreadPoolStats(); + int iterations = randomIntBetween(1000, 2000); for (int i = 0; i < iterations; i++) { controller.forceCheck(); } assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.completed(), equalTo(beforeStats.completed() + iterations - 1)); + assertThat(stats.active(), greaterThanOrEqualTo(1)); }); - refreshLatch.get().countDown(); // allow refresh + latch.countDown(); assertBusy(() -> { ThreadPoolStats.Stats stats = getRefreshThreadPoolStats(); - assertThat(stats.completed(), equalTo(beforeStats.completed() + iterations)); + assertThat(stats.queue(), equalTo(0)); }); - assertThat(shard.refreshStats().getTotal(), equalTo(refreshStats.getTotal() + 1)); + ThreadPoolStats.Stats afterStats = getRefreshThreadPoolStats(); + // The number of completed tasks should be in the order of the size of the refresh thread pool, way below the number of iterations, + // since we would not queue a shard to write its indexing buffer if it's already in the queue. + assertThat(afterStats.completed() - beforeStats.completed(), lessThan(100L)); closeShards(shard); } } diff --git a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java index 305b25d377e06..76f266eb80ab9 100644 --- a/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java +++ b/server/src/test/java/org/elasticsearch/indices/IndicesQueryCacheTests.java @@ -281,7 +281,6 @@ public void testTwoShards() throws IOException { // with an empty cache gets closed. In that particular case, the eviction // callback is called with a number of evicted entries equal to 0 // see https://github.com/elastic/elasticsearch/issues/15043 - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/98776") public void testStatsOnEviction() throws IOException { Directory dir1 = newDirectory(); IndexWriter w1 = new IndexWriter(dir1, newIndexWriterConfig()); diff --git a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java index 592997f91932e..b0ad7d333d172 100644 --- a/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java +++ b/server/src/test/java/org/elasticsearch/indices/cluster/ClusterStateChanges.java @@ -70,6 +70,8 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterStateTaskExecutorUtils; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -402,9 +404,11 @@ public ClusterState addNode(ClusterState clusterState, DiscoveryNode discoveryNo return runTasks( new NodeJoinExecutor(allocationService, (s, p, r) -> {}), clusterState, - List.of(JoinTask.singleNode(discoveryNode, transportVersion, DUMMY_REASON, ActionListener.running(() -> { - throw new AssertionError("should not complete publication"); - }), clusterState.term())) + List.of( + JoinTask.singleNode(discoveryNode, CompatibilityVersionsUtils.staticCurrent(), DUMMY_REASON, ActionListener.running(() -> { + throw new AssertionError("should not complete publication"); + }), clusterState.term()) + ) ); } @@ -415,9 +419,16 @@ public ClusterState joinNodesAndBecomeMaster(ClusterState clusterState, List new JoinTask.NodeJoinTask(node, transportVersion, DUMMY_REASON, ActionListener.running(() -> { - throw new AssertionError("should not complete publication"); - }))), + .map( + node -> new JoinTask.NodeJoinTask( + node, + new CompatibilityVersions(transportVersion), + DUMMY_REASON, + ActionListener.running(() -> { + throw new AssertionError("should not complete publication"); + }) + ) + ), clusterState.term() + between(1, 10) ) ) diff --git a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java index 6a863865d411b..a46cd75dfd493 100644 --- a/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/indices/recovery/RecoverySourceHandlerTests.java @@ -132,7 +132,7 @@ public class RecoverySourceHandlerTests extends MapperServiceTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", - Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build() + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build() ); private static final BytesArray TRANSLOG_OPERATION_SOURCE = new BytesArray("{}".getBytes(StandardCharsets.UTF_8)); private final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java index 38c984c3de933..924ca1fc7a1e9 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestDocumentTests.java @@ -1158,4 +1158,16 @@ public void testIndexHistory() { assertFalse(ingestDocument.updateIndexHistory(index1)); assertThat(ingestDocument.getIndexHistory(), Matchers.contains(index1, index2)); } + + public void testEqualsAndHashCodeWithArray() { + // Test that equality still works when the ingest document uses primitive arrays, + // since normal .equals() methods would not work for Maps containing these arrays. + byte[] numbers = new byte[] { 0, 1, 2 }; + ingestDocument.setFieldValue("some.nested.array", numbers); + IngestDocument copy = new IngestDocument(ingestDocument); + byte[] copiedNumbers = copy.getFieldValue("some.nested.array", byte[].class); + assertArrayEquals(numbers, copiedNumbers); + assertNotEquals(numbers, copiedNumbers); + assertThat(copy, equalTo(ingestDocument)); + } } diff --git a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java index 28320128b68b3..22f6035e2b325 100644 --- a/server/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java +++ b/server/src/test/java/org/elasticsearch/monitor/jvm/JvmGcMonitorServiceSettingsTests.java @@ -18,6 +18,7 @@ import java.util.AbstractMap; import java.util.HashSet; import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; @@ -30,7 +31,7 @@ public class JvmGcMonitorServiceSettingsTests extends ESTestCase { public void testEmptySettingsAreOkay() throws InterruptedException { AtomicBoolean scheduled = new AtomicBoolean(); - execute(Settings.EMPTY, (command, interval, name) -> { + execute(Settings.EMPTY, (command, interval, executor) -> { scheduled.set(true); return new MockCancellable(); }, () -> assertTrue(scheduled.get())); @@ -39,7 +40,7 @@ public void testEmptySettingsAreOkay() throws InterruptedException { public void testDisabledSetting() throws InterruptedException { Settings settings = Settings.builder().put("monitor.jvm.gc.enabled", "false").build(); AtomicBoolean scheduled = new AtomicBoolean(); - execute(settings, (command, interval, name) -> { + execute(settings, (command, interval, executor) -> { scheduled.set(true); return new MockCancellable(); }, () -> assertFalse(scheduled.get())); @@ -166,14 +167,14 @@ public void testIllegalOverheadSettings() throws InterruptedException { }, true, null); } - private static void execute(Settings settings, TriFunction scheduler, Runnable asserts) + private static void execute(Settings settings, TriFunction scheduler, Runnable asserts) throws InterruptedException { execute(settings, scheduler, null, false, asserts); } private static void execute( Settings settings, - TriFunction scheduler, + TriFunction scheduler, Consumer consumer, boolean constructionShouldFail, Runnable asserts @@ -184,8 +185,8 @@ private static void execute( try { threadPool = new TestThreadPool(JvmGcMonitorServiceSettingsTests.class.getCanonicalName()) { @Override - public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String name) { - return scheduler.apply(command, interval, name); + public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, Executor executor) { + return scheduler.apply(command, interval, executor); } }; try { diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 50b4750a72c85..580115a1e9af3 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -513,13 +513,13 @@ public TestTasksResponse( public TestTasksResponse(StreamInput in) throws IOException { super(in); - tasks = in.readList(TestTaskResponse::new); + tasks = in.readCollectionAsList(TestTaskResponse::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(tasks); + out.writeCollection(tasks); } public List getTasks() { diff --git a/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java b/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java index 8e98e5abb5600..08e8e46d4b95a 100644 --- a/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java +++ b/server/src/test/java/org/elasticsearch/reservedstate/action/ReservedClusterSettingsActionTests.java @@ -11,7 +11,9 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsUpdater; import org.elasticsearch.reservedstate.TransformState; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentParser; @@ -19,11 +21,20 @@ import org.elasticsearch.xcontent.XContentType; import java.util.Collections; +import java.util.Set; +import static org.elasticsearch.common.settings.Setting.Property.Dynamic; +import static org.elasticsearch.common.settings.Setting.Property.NodeScope; import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.equalTo; public class ReservedClusterSettingsActionTests extends ESTestCase { + static final Setting dummySetting1 = Setting.simpleString("dummy.setting1", "default1", NodeScope, Dynamic); + static final Setting dummySetting2 = Setting.simpleString("dummy.setting2", "default2", NodeScope, Dynamic); + static final ClusterSettings clusterSettings = new ClusterSettings(Settings.EMPTY, Set.of(dummySetting1, dummySetting2)); + static final ReservedClusterSettingsAction testAction = new ReservedClusterSettingsAction(clusterSettings); + private TransformState processJSON(ReservedClusterSettingsAction action, TransformState prevState, String json) throws Exception { try (XContentParser parser = XContentType.JSON.xContent().createParser(XContentParserConfiguration.EMPTY, json)) { return action.transform(parser.map(), prevState); @@ -97,4 +108,40 @@ public void testSetUnsetSettings() throws Exception { assertEquals(0, updatedState.keys().size()); assertNull(updatedState.state().metadata().persistentSettings().get("indices.recovery.max_bytes_per_sec")); } + + public void testSettingNameNormalization() throws Exception { + Settings prevSettings = Settings.builder().put("dummy.setting1", "a-value").build(); + var clusterState = new SettingsUpdater(clusterSettings).updateSettings( + ClusterState.EMPTY_STATE, + Settings.EMPTY, + prevSettings, + logger + ); + TransformState prevState = new TransformState(clusterState, Set.of("dummy.setting1")); + + String json = """ + { + "dummy": { + "setting1": "value1", + "setting2": "value2" + } + } + """; + + TransformState newState = processJSON(testAction, prevState, json); + assertThat(newState.keys(), containsInAnyOrder("dummy.setting1", "dummy.setting2")); + assertThat(newState.state().metadata().persistentSettings().get("dummy.setting1"), equalTo("value1")); + assertThat(newState.state().metadata().persistentSettings().get("dummy.setting2"), equalTo("value2")); + + String jsonRemoval = """ + { + "dummy": { + "setting2": "value2" + } + } + """; + TransformState newState2 = processJSON(testAction, prevState, jsonRemoval); + assertThat(newState2.keys(), containsInAnyOrder("dummy.setting2")); + assertThat(newState2.state().metadata().persistentSettings().get("dummy.setting2"), equalTo("value2")); + } } diff --git a/server/src/test/java/org/elasticsearch/script/ScriptStatsTests.java b/server/src/test/java/org/elasticsearch/script/ScriptStatsTests.java index 83823bfb1e1df..f4af7aa381d5e 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptStatsTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptStatsTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.script; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -139,7 +140,7 @@ public void testTimeSeriesIsEmpty() { public void testTimeSeriesSerialization() throws IOException { ScriptContextStats stats = randomStats(); - ScriptContextStats deserStats = serDeser(TransportVersion.V_8_0_0, TransportVersion.V_7_16_0, stats); + ScriptContextStats deserStats = serDeser(TransportVersions.V_8_0_0, TransportVersions.V_7_16_0, stats); // Due to how the versions are handled by TimeSeries serialization, we cannot just simply assert that both object are // equals but not the same assertEquals(stats.getCompilations(), deserStats.getCompilations()); @@ -150,11 +151,11 @@ public void testTimeSeriesSerialization() throws IOException { assertTrue(deserStats.getCacheEvictionsHistory().areTimingsEmpty()); assertEquals(stats.getCacheEvictions(), deserStats.getCacheEvictionsHistory().total); - deserStats = serDeser(TransportVersion.V_8_0_0, TransportVersion.V_8_0_0, stats); + deserStats = serDeser(TransportVersions.V_8_0_0, TransportVersions.V_8_0_0, stats); assertNotSame(stats, deserStats); assertEquals(stats, deserStats); - deserStats = serDeser(TransportVersion.V_8_1_0, TransportVersion.V_7_16_0, stats); + deserStats = serDeser(TransportVersions.V_8_1_0, TransportVersions.V_7_16_0, stats); // Due to how the versions are handled by TimeSeries serialization, we cannot just simply assert that both object are // equals but not the same assertEquals(stats.getCompilations(), deserStats.getCompilations()); @@ -163,7 +164,7 @@ public void testTimeSeriesSerialization() throws IOException { assertEquals(new TimeSeries(stats.getCompilationsHistory().total), deserStats.getCompilationsHistory()); assertEquals(new TimeSeries(stats.getCacheEvictionsHistory().total), deserStats.getCacheEvictionsHistory()); - deserStats = serDeser(TransportVersion.V_8_1_0, TransportVersion.V_8_1_0, stats); + deserStats = serDeser(TransportVersions.V_8_1_0, TransportVersions.V_8_1_0, stats); assertNotSame(stats, deserStats); assertEquals(stats, deserStats); } diff --git a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java index 46c186e656b6b..eb6318d8abe75 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchModuleTests.java @@ -10,7 +10,9 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.util.CharsRefBuilder; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -284,6 +286,7 @@ public List> getSuggesters() { 1, module.getNamedWriteables().stream().filter(e -> e.categoryClass.equals(Suggestion.class) && e.name.equals("test")).count() ); + assertEquals(1, module.getNamedWriteables().stream().filter(e -> e.categoryClass.equals(GenericNamedWriteable.class)).count()); } public void testRegisterHighlighter() { @@ -545,7 +548,7 @@ private static TestAggregationBuilder fromXContent(String name, XContentParser p @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } @@ -602,7 +605,7 @@ protected void validate(ValidationContext context) {} @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } @@ -652,7 +655,7 @@ public RescoreContext innerBuildContext(int windowSize, SearchExecutionContext c @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } @@ -720,7 +723,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java index 246f70fdb581e..868832f61ef9c 100644 --- a/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java +++ b/server/src/test/java/org/elasticsearch/search/SearchServiceTests.java @@ -19,6 +19,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchTimeoutException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsResponse; @@ -1952,7 +1953,7 @@ public void testEnableSearchWorkerThreads() throws IOException { } } - public void testEnableQueryPhaseParallelCollection() throws IOException { + public void testDetermineMaximumNumberOfSlices() { IndexService indexService = createIndex("index", Settings.EMPTY); IndexShard indexShard = indexService.getShard(0); ShardSearchRequest request = new ShardSearchRequest( @@ -1981,20 +1982,20 @@ public void testEnableQueryPhaseParallelCollection() throws IOException { { assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.DFS)); assertEquals(1, service.determineMaximumNumberOfSlices(null, request, ResultsType.DFS)); - assertEquals(1, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.QUERY)); + assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.QUERY)); assertEquals(1, service.determineMaximumNumberOfSlices(notThreadPoolExecutor, request, ResultsType.DFS)); } try { ClusterUpdateSettingsResponse response = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), true).build()) + .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) .get(); assertTrue(response.isAcknowledged()); { assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.DFS)); assertEquals(1, service.determineMaximumNumberOfSlices(null, request, ResultsType.DFS)); - assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.QUERY)); + assertEquals(1, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.QUERY)); assertEquals(1, service.determineMaximumNumberOfSlices(null, request, ResultsType.QUERY)); assertEquals(1, service.determineMaximumNumberOfSlices(notThreadPoolExecutor, request, ResultsType.DFS)); } @@ -2007,7 +2008,7 @@ public void testEnableQueryPhaseParallelCollection() throws IOException { .get(); { assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.DFS)); - assertEquals(1, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.QUERY)); + assertEquals(executorPoolSize, service.determineMaximumNumberOfSlices(threadPoolExecutor, request, ResultsType.QUERY)); } } } @@ -2075,12 +2076,23 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true); ContextIndexSearcher searcher = searchContext.searcher(); assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices(searcher.getIndexReader().leaves(), maxPoolSize, 1).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); searcher.search(termQuery, new TotalHitCountCollectorManager()); assertBusy( () -> assertEquals( - "The number of slices should be 1 as QUERY parallel collection is disabled by default.", - 1, + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices, executor.getCompletedTaskCount() - priorExecutorTaskCount ) ); @@ -2118,40 +2130,25 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { ClusterUpdateSettingsResponse response = client().admin() .cluster() .prepareUpdateSettings() - .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), true).build()) + .setPersistentSettings(Settings.builder().put(QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), false).build()) .get(); assertTrue(response.isAcknowledged()); { SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true); ContextIndexSearcher searcher = searchContext.searcher(); assertNotNull(searcher.getExecutor()); - - final int maxPoolSize = executor.getMaximumPoolSize(); - assertEquals( - "Sanity check to ensure this isn't the default of 1 when pool size is unset", - configuredMaxPoolSize, - maxPoolSize - ); - - final int expectedSlices = ContextIndexSearcher.computeSlices( - searcher.getIndexReader().leaves(), - maxPoolSize, - 1 - ).length; - assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); - final long priorExecutorTaskCount = executor.getCompletedTaskCount(); searcher.search(termQuery, new TotalHitCountCollectorManager()); assertBusy( () -> assertEquals( - "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", - expectedSlices, + "The number of slices should be 1 when QUERY parallel collection is disabled.", + 1, executor.getCompletedTaskCount() - priorExecutorTaskCount ) ); } } finally { - // Reset to the original default setting. + // Reset to the original default setting and check to ensure it takes effect. client().admin() .cluster() .prepareUpdateSettings() @@ -2161,12 +2158,27 @@ public void testSlicingBehaviourForParallelCollection() throws Exception { SearchContext searchContext = service.createContext(readerContext, request, task, ResultsType.QUERY, true); ContextIndexSearcher searcher = searchContext.searcher(); assertNotNull(searcher.getExecutor()); + + final int maxPoolSize = executor.getMaximumPoolSize(); + assertEquals( + "Sanity check to ensure this isn't the default of 1 when pool size is unset", + configuredMaxPoolSize, + maxPoolSize + ); + + final int expectedSlices = ContextIndexSearcher.computeSlices( + searcher.getIndexReader().leaves(), + maxPoolSize, + 1 + ).length; + assertNotEquals("Sanity check to ensure this isn't the default of 1 when pool size is unset", 1, expectedSlices); + final long priorExecutorTaskCount = executor.getCompletedTaskCount(); searcher.search(termQuery, new TotalHitCountCollectorManager()); assertBusy( () -> assertEquals( - "The number of slices should be 1 for Query when QUERY_PHASE_PARALLEL_COLLECTION_ENABLED is disabled.", - 1, + "QUERY supports parallel collection when enabled, so the number of slices should be > 1.", + expectedSlices, executor.getCompletedTaskCount() - priorExecutorTaskCount ) ); @@ -2276,7 +2288,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java index d679a746d3e16..7d3985b9fc5f5 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregationCollectorTests.java @@ -64,7 +64,7 @@ private void assertNeedsScores(AggregationBuilder builder, boolean expected) thr builder, new MatchAllDocsQuery(), iw -> {}, - (indexSearcher, agg) -> assertEquals(expected, agg.scoreMode().needsScores()) + (indexReader, agg) -> assertEquals(expected, agg.scoreMode().needsScores()) ); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java index 665f66fcca316..bf0b087f67a5a 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/AggregatorFactoriesTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -397,7 +398,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java index 3797fb8f79d03..7244e711544ac 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoHashGridTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.geo.GeoBoundingBoxTests; import org.elasticsearch.common.geo.GeoPoint; @@ -51,12 +52,12 @@ protected GeoHashGridAggregationBuilder createTestAggregatorBuilder() { public void testSerializationPreBounds() throws Exception { TransportVersion noBoundsSupportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersion.V_7_5_0 + TransportVersions.V_7_0_0, + TransportVersions.V_7_5_0 ); GeoHashGridAggregationBuilder builder = createTestAggregatorBuilder(); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setTransportVersion(TransportVersion.V_7_6_0); + output.setTransportVersion(TransportVersions.V_7_6_0); builder.writeTo(output); try ( StreamInput in = new NamedWriteableAwareStreamInput( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java index 334ddca4a2d63..5e4e4870b1ef7 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/GeoTileGridTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.aggregations.bucket; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.geo.GeoBoundingBoxTests; import org.elasticsearch.common.geo.GeoPoint; @@ -51,12 +52,12 @@ protected GeoTileGridAggregationBuilder createTestAggregatorBuilder() { public void testSerializationPreBounds() throws Exception { TransportVersion noBoundsSupportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersion.V_7_5_0 + TransportVersions.V_7_0_0, + TransportVersions.V_7_5_0 ); GeoTileGridAggregationBuilder builder = createTestAggregatorBuilder(); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setTransportVersion(TransportVersion.V_7_6_0); + output.setTransportVersion(TransportVersions.V_7_6_0); builder.writeTo(output); try ( StreamInput in = new NamedWriteableAwareStreamInput( diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java index 63b69cc86c8da..f9f6fbf522749 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/filter/FiltersAggregatorTests.java @@ -20,7 +20,6 @@ import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.IndexOrDocValuesQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.MatchNoDocsQuery; import org.apache.lucene.search.Query; @@ -134,7 +133,7 @@ public void testBuildEmpty() throws IOException { if (askForOtherBucket) { builder.otherBucket(true).otherBucketKey("other"); } - withAggregator(builder, new MatchAllDocsQuery(), iw -> {}, (searcher, aggregator) -> { + withAggregator(builder, new MatchAllDocsQuery(), iw -> {}, (reader, aggregator) -> { InternalFilters result = (InternalFilters) aggregator.buildEmptyAggregation(); for (int i = 0; i < filters.length; i++) { assertThat(result.getBucketByKey(String.valueOf(i)).getDocCount(), equalTo(0L)); @@ -308,7 +307,7 @@ public void testMergingQueries() throws IOException { iw.addDocument(List.of(new LongPoint("test", time), new SortedNumericDocValuesField("test", time))); time = DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2020-02-01"); iw.addDocument(List.of(new LongPoint("test", time), new SortedNumericDocValuesField("test", time))); - }, (searcher, aggregator) -> { + }, (reader, aggregator) -> { /* * The topLevelQuery is entirely contained within the filter query so * it is good enough to match that. See MergedPointRangeQueryTests for @@ -661,9 +660,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} bitsetFilterCache, LongPoint.newRangeQuery("t", 5, Long.MAX_VALUE) ); - IndexSearcher searcher = newSearcher(limitedReader); - int segmentsWithLiveDocs = (int) searcher.getIndexReader() - .leaves() + int segmentsWithLiveDocs = (int) limitedReader.leaves() .stream() .map(LeafReaderContext::reader) .filter(leafReader -> leafReader.getLiveDocs() != null) @@ -687,7 +684,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} "filters", matchesList().item( matchesMap().entry("query", "*:*") - .entry("segments_counted_in_constant_time", searcher.getLeafContexts().size()) + .entry("segments_counted_in_constant_time", limitedReader.leaves().size()) ) ) ) @@ -735,9 +732,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} bitsetFilterCache, LongPoint.newRangeQuery("t", 5, Long.MAX_VALUE) ); - IndexSearcher searcher = newSearcher(limitedReader); - int segmentsWithLiveDocs = (int) searcher.getIndexReader() - .leaves() + int segmentsWithLiveDocs = (int) limitedReader.leaves() .stream() .map(LeafReaderContext::reader) .filter(leafReader -> leafReader.getLiveDocs() != null) @@ -761,7 +756,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} "filters", matchesList().item( matchesMap().entry("query", "foo:bar") - .entry("segments_counted_in_constant_time", lessThan(searcher.getLeafContexts().size())) + .entry("segments_counted_in_constant_time", lessThan(limitedReader.leaves().size())) ) ) ) @@ -806,9 +801,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} bitsetFilterCache, LongPoint.newRangeQuery("t", Long.MIN_VALUE, Long.MAX_VALUE) ); - IndexSearcher searcher = newSearcher(limitedReader); - int segmentsWithLiveDocs = (int) searcher.getIndexReader() - .leaves() + int segmentsWithLiveDocs = (int) limitedReader.leaves() .stream() .map(LeafReaderContext::reader) .filter(leafReader -> leafReader.getLiveDocs() != null) @@ -833,7 +826,7 @@ public void onCache(ShardId shardId, Accountable accountable) {} "filters", matchesList().item( matchesMap().entry("query", "foo:bar") - .entry("segments_counted_in_constant_time", searcher.getLeafContexts().size()) + .entry("segments_counted_in_constant_time", limitedReader.leaves().size()) ) ) ) @@ -1599,10 +1592,10 @@ private void docValuesFieldExistsNoDataTestCase(MappedFieldType fieldType) throw }; // Exists queries convert to MatchNone if this isn't defined FieldNamesFieldMapper.FieldNamesFieldType fnft = FieldNamesFieldMapper.FieldNamesFieldType.get(true); - withAggregator(builder, new MatchAllDocsQuery(), buildIndex, (searcher, aggregator) -> { + withAggregator(builder, new MatchAllDocsQuery(), buildIndex, (reader, aggregator) -> { assertThat(aggregator, instanceOf(FilterByFilterAggregator.class)); - Map debug = collectAndGetFilterDebugInfo(searcher, aggregator); + Map debug = collectAndGetFilterDebugInfo(reader, aggregator); assertMap(debug, matchesMap().extraOk().entry("segments_counted_in_constant_time", greaterThan(0))); }, fieldType, fnft); testCase(buildIndex, (InternalFilters result) -> { @@ -1616,9 +1609,9 @@ protected List objectMappers() { return MOCK_OBJECT_MAPPERS; } - private Map collectAndGetFilterDebugInfo(IndexSearcher searcher, Aggregator aggregator) throws IOException { + private Map collectAndGetFilterDebugInfo(IndexReader reader, Aggregator aggregator) throws IOException { aggregator.preCollection(); - for (LeafReaderContext ctx : searcher.getIndexReader().leaves()) { + for (LeafReaderContext ctx : reader.leaves()) { LeafBucketCollector leafCollector = aggregator.getLeafCollector(new AggregationExecutionContext(ctx, null, null, null)); assertTrue(leafCollector.isNoop()); } diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java index 0c08324994bc7..972ec1f47a904 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/histogram/DateHistogramAggregatorTests.java @@ -1118,7 +1118,7 @@ public void testBuildEmpty() throws IOException { new DateHistogramAggregationBuilder("test").field(AGGREGABLE_DATE).calendarInterval(DateHistogramInterval.YEAR).offset(10), new MatchAllDocsQuery(), iw -> {}, - (searcher, aggregator) -> { + (reader, aggregator) -> { InternalDateHistogram histo = (InternalDateHistogram) aggregator.buildEmptyAggregation(); /* * There was a time where we including the offset in the diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java index bc607b25b7c5c..12d323fb15ca0 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/bucket/terms/TermsAggregatorTests.java @@ -1891,7 +1891,7 @@ public void testAsSubAgg() throws IOException { terms1 = dh.getBuckets().get(1).getAggregations().get("k"); assertThat(terms1.getBuckets().stream().map(StringTerms.Bucket::getKey).collect(toList()), equalTo(List.of("a"))); }, new AggTestConfig(builder, dft, kft)); - withAggregator(builder, new MatchAllDocsQuery(), buildIndex, (searcher, aggregator) -> { + withAggregator(builder, new MatchAllDocsQuery(), buildIndex, (reader, aggregator) -> { TermsAggregator terms = (TermsAggregator) aggregator.subAggregator("k"); Map info = new HashMap<>(); terms.collectDebugInfo(info::put); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java index 34f0d40985358..4c4ac418c949b 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/InternalScriptedMetricTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.search.aggregations.metrics; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoPoint; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; @@ -283,8 +283,8 @@ public void testOldSerialization() throws IOException { InternalAggregation.class, TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_7_8_0) + TransportVersions.V_7_0_0, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_7_8_0) ) ); assertThat(roundTripped, equalTo(original)); @@ -299,8 +299,8 @@ public void testOldSerialization() throws IOException { InternalAggregation.class, TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_7_8_0) + TransportVersions.V_7_0_0, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_7_8_0) ) ) ); diff --git a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java index a70e7a241fe7a..f242e19012a35 100644 --- a/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java +++ b/server/src/test/java/org/elasticsearch/search/aggregations/metrics/TDigestStateTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -193,10 +194,10 @@ public void testSerialization() throws IOException { backwardsCompatible.add(i); } - TDigestState serialized = writeToAndReadFrom(state, TransportVersion.V_8_500_014); + TDigestState serialized = writeToAndReadFrom(state, TransportVersions.V_8_500_020); assertEquals(serialized, state); - TDigestState serializedBackwardsCompatible = writeToAndReadFrom(state, TransportVersion.V_8_500_010); + TDigestState serializedBackwardsCompatible = writeToAndReadFrom(state, TransportVersions.V_8_8_1); assertNotEquals(serializedBackwardsCompatible, state); assertEquals(serializedBackwardsCompatible, backwardsCompatible); } diff --git a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java index 0c78ecf883220..83a3497e44259 100644 --- a/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java +++ b/server/src/test/java/org/elasticsearch/search/builder/SearchSourceBuilderTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.builder; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.cluster.stats.SearchUsageStats; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; @@ -30,6 +31,7 @@ import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder; import org.elasticsearch.index.query.functionscore.LinearDecayFunctionBuilder; import org.elasticsearch.script.Script; +import org.elasticsearch.script.ScriptType; import org.elasticsearch.search.AbstractSearchTestCase; import org.elasticsearch.search.SearchExtBuilder; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; @@ -41,6 +43,9 @@ import org.elasticsearch.search.slice.SliceBuilder; import org.elasticsearch.search.sort.FieldSortBuilder; import org.elasticsearch.search.sort.ScoreSortBuilder; +import org.elasticsearch.search.sort.ScriptSortBuilder; +import org.elasticsearch.search.sort.SortBuilder; +import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.search.suggest.SuggestBuilder; import org.elasticsearch.search.suggest.term.TermSuggestionBuilder; @@ -63,7 +68,9 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; +import static java.util.Collections.emptyMap; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.Matchers.equalTo; @@ -122,8 +129,8 @@ public void testSerializingWithRuntimeFieldsBeforeSupportedThrows() { SearchSourceBuilder original = new SearchSourceBuilder().runtimeMappings(randomRuntimeMappings()); TransportVersion v = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_7_11_0) + TransportVersions.V_7_0_0, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_7_11_0) ); Exception e = expectThrows(IllegalArgumentException.class, () -> copyBuilder(original, v)); assertThat(e.getMessage(), equalTo("Versions before 7110099 don't support [runtime_mappings] and search was sent to [" + v + "]")); @@ -886,25 +893,61 @@ public void testSearchTotalUsageCollection() throws IOException { } public void testSupportsParallelCollection() { - SearchSourceBuilder searchSourceBuilder = createSearchSourceBuilder(); - searchSourceBuilder.collapse(null); - if (searchSourceBuilder.aggregations() == null) { - assertTrue(searchSourceBuilder.supportsParallelCollection()); - } else { - assertEquals(searchSourceBuilder.aggregations().supportsParallelCollection(), searchSourceBuilder.supportsParallelCollection()); + Supplier newSearchSourceBuilder = () -> { + SearchSourceBuilder searchSourceBuilder = createSearchSourceBuilder(); + searchSourceBuilder.collapse(null); + searchSourceBuilder.sort((List>) null); + searchSourceBuilder.profile(false); + return searchSourceBuilder; + }; + { + SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); + if (searchSourceBuilder.aggregations() == null) { + assertTrue(searchSourceBuilder.supportsParallelCollection()); + } else { + assertEquals( + searchSourceBuilder.aggregations().supportsParallelCollection(), + searchSourceBuilder.supportsParallelCollection() + ); + } } + { + SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); + searchSourceBuilder.aggregation(new MaxAggregationBuilder("max")); + assertTrue(searchSourceBuilder.supportsParallelCollection()); - searchSourceBuilder.aggregation(new MaxAggregationBuilder("max")); - assertTrue(searchSourceBuilder.supportsParallelCollection()); - - searchSourceBuilder.aggregation(new TermsAggregationBuilder("terms")); - assertFalse(searchSourceBuilder.supportsParallelCollection()); - - searchSourceBuilder.collapse(CollapseBuilderTests.randomCollapseBuilder()); - assertFalse(searchSourceBuilder.supportsParallelCollection()); - - SearchSourceBuilder collapse = new SearchSourceBuilder().collapse(CollapseBuilderTests.randomCollapseBuilder()); - assertFalse(collapse.supportsParallelCollection()); + } + { + SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); + searchSourceBuilder.aggregation(new TermsAggregationBuilder("terms")); + assertFalse(searchSourceBuilder.supportsParallelCollection()); + } + { + SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); + searchSourceBuilder.collapse(CollapseBuilderTests.randomCollapseBuilder()); + assertFalse(searchSourceBuilder.supportsParallelCollection()); + } + { + SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder().collapse(CollapseBuilderTests.randomCollapseBuilder()); + assertFalse(searchSourceBuilder.supportsParallelCollection()); + } + { + SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); + searchSourceBuilder.sort(SortBuilders.scoreSort().order(randomFrom(SortOrder.values()))); + assertTrue(searchSourceBuilder.supportsParallelCollection()); + searchSourceBuilder.sort( + SortBuilders.scriptSort( + new Script(ScriptType.INLINE, Script.DEFAULT_SCRIPT_LANG, "foo", emptyMap()), + ScriptSortBuilder.ScriptSortType.NUMBER + ).order(randomFrom(SortOrder.values())) + ); + assertFalse(searchSourceBuilder.supportsParallelCollection()); + } + { + SearchSourceBuilder searchSourceBuilder = newSearchSourceBuilder.get(); + searchSourceBuilder.profile(true); + assertFalse(searchSourceBuilder.supportsParallelCollection()); + } } private void assertIndicesBoostParseErrorMessage(String restContent, String expectedErrorMessage) throws IOException { diff --git a/server/src/test/java/org/elasticsearch/search/geo/GeoBoundsGenericWriteableTests.java b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundsGenericWriteableTests.java new file mode 100644 index 0000000000000..500903c437d91 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/search/geo/GeoBoundsGenericWriteableTests.java @@ -0,0 +1,91 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.search.geo; + +import org.apache.lucene.geo.Rectangle; +import org.apache.lucene.tests.geo.GeoTestUtil; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.geo.GeoBoundingBox; +import org.elasticsearch.common.geo.GeoPoint; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.search.geo.GeoBoundsGenericWriteableTests.GenericWriteableWrapper; +import org.elasticsearch.test.AbstractWireTestCase; + +import java.io.IOException; +import java.util.List; + +import static org.hamcrest.Matchers.containsString; + +public class GeoBoundsGenericWriteableTests extends AbstractWireTestCase { + + /** + * Wrapper around a GeoBoundingBox to verify that it round-trips via {@code writeGenericValue} and {@code readGenericValue} + */ + public record GenericWriteableWrapper(GeoBoundingBox geoBoundingBox) implements Writeable { + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeGenericValue(geoBoundingBox); + } + + public static GenericWriteableWrapper readFrom(StreamInput in) throws IOException { + return new GenericWriteableWrapper((GeoBoundingBox) in.readGenericValue()); + } + } + + private static final NamedWriteableRegistry NAMED_WRITEABLE_REGISTRY = new NamedWriteableRegistry( + List.of(new NamedWriteableRegistry.Entry(GenericNamedWriteable.class, GeoBoundingBox.class.getSimpleName(), GeoBoundingBox::new)) + ); + + @Override + protected NamedWriteableRegistry writableRegistry() { + return NAMED_WRITEABLE_REGISTRY; + } + + @Override + protected GenericWriteableWrapper createTestInstance() { + Rectangle box = GeoTestUtil.nextBox(); + return new GenericWriteableWrapper(new GeoBoundingBox(new GeoPoint(box.maxLat, box.minLon), new GeoPoint(box.minLat, box.maxLon))); + } + + @Override + protected GenericWriteableWrapper mutateInstance(GenericWriteableWrapper instance) throws IOException { + GeoBoundingBox geoBoundingBox = instance.geoBoundingBox; + double width = geoBoundingBox.right() - geoBoundingBox.left(); + double height = geoBoundingBox.top() - geoBoundingBox.bottom(); + double top = geoBoundingBox.top() - height / 4; + double left = geoBoundingBox.left() + width / 4; + double bottom = geoBoundingBox.bottom() + height / 4; + double right = geoBoundingBox.right() - width / 4; + return new GenericWriteableWrapper(new GeoBoundingBox(new GeoPoint(top, left), new GeoPoint(bottom, right))); + } + + @Override + protected GenericWriteableWrapper copyInstance(GenericWriteableWrapper instance, TransportVersion version) throws IOException { + return copyInstance(instance, writableRegistry(), StreamOutput::writeWriteable, GenericWriteableWrapper::readFrom, version); + } + + public void testSerializationFailsWithOlderVersion() { + TransportVersion older = TransportVersions.V_8_500_069; + assert older.before(TransportVersions.V_8_500_070); + final var testInstance = createTestInstance().geoBoundingBox(); + try (var output = new BytesStreamOutput()) { + output.setTransportVersion(older); + assertThat( + expectThrows(Throwable.class, () -> output.writeGenericValue(testInstance)).getMessage(), + containsString("[GeoBoundingBox] requires minimal transport version") + ); + } + } +} diff --git a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java index 676d62ce73b49..01a60cc5060e9 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ContextIndexSearcherTests.java @@ -137,7 +137,7 @@ public void testIntersectScorerAndRoleBits() throws Exception { 1f ); - LeafReaderContext leaf = directoryReader.leaves().get(0); + LeafReaderContext leaf = searcher.getIndexReader().leaves().get(0); CombinedBitSet bitSet = new CombinedBitSet(query(leaf, "field1", "value1"), leaf.reader().getLiveDocs()); LeafCollector leafCollector = new LeafBucketCollector() { diff --git a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java index 65b7eb1c844c4..ed92bdb1e5919 100644 --- a/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java +++ b/server/src/test/java/org/elasticsearch/search/internal/ShardSearchRequestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.internal; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.cluster.metadata.AliasMetadata; @@ -227,16 +228,16 @@ public void testChannelVersion() throws Exception { for (int i = 0; i < iterations; i++) { TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); if (request.isForceSyntheticSource()) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_4_0, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_4_0, TransportVersion.current()); } if (Optional.ofNullable(request.source()).map(SearchSourceBuilder::knnSearch).map(List::size).orElse(0) > 1) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_7_0, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_7_0, TransportVersion.current()); } if (request.source() != null && request.source().rankBuilder() != null) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_8_0, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_8_0, TransportVersion.current()); } if (request.source() != null && request.source().subSearches().size() >= 2) { - version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_500_013, TransportVersion.current()); + version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_500_020, TransportVersion.current()); } request = copyWriteable(request, namedWriteableRegistry, ShardSearchRequest::new, version); channelVersion = TransportVersion.min(channelVersion, version); @@ -269,7 +270,7 @@ public void testForceSyntheticUnsupported() throws IOException { request.setForceSyntheticSource(true); ShardSearchRequest shardRequest = createShardSearchReqest(request); StreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_3_0); + out.setTransportVersion(TransportVersions.V_8_3_0); Exception e = expectThrows(IllegalArgumentException.class, () -> shardRequest.writeTo(out)); assertEquals(e.getMessage(), "force_synthetic_source is not supported before 8.4.0"); } diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/SlowRunningQueryBuilder.java b/server/src/test/java/org/elasticsearch/search/query/SlowRunningQueryBuilder.java similarity index 75% rename from x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/SlowRunningQueryBuilder.java rename to server/src/test/java/org/elasticsearch/search/query/SlowRunningQueryBuilder.java index c57f853dfd0c3..fdee3da477bb7 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/SlowRunningQueryBuilder.java +++ b/server/src/test/java/org/elasticsearch/search/query/SlowRunningQueryBuilder.java @@ -1,11 +1,12 @@ /* * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. */ -package org.elasticsearch.xpack.search; +package org.elasticsearch.search.query; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; @@ -13,6 +14,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; @@ -28,20 +30,38 @@ * * This QueryBuilder is useful in tests that need a slow running query, such as when * you are trying to have a query timeout. + * + * The sleep can be specified to happen on all indices or only on a specified index. + * After sleeping (if at all), it performs a MatchAll query. */ public class SlowRunningQueryBuilder extends AbstractQueryBuilder { public static final String NAME = "slow"; private long sleepTime; + private String index; + /** + * Sleep for sleepTime millis on all indexes + * @param sleepTime + */ public SlowRunningQueryBuilder(long sleepTime) { this.sleepTime = sleepTime; } + /** + * Sleep for sleepTime millis but only on the specified index + * @param sleepTime + */ + public SlowRunningQueryBuilder(long sleepTime, String index) { + this.sleepTime = sleepTime; + this.index = index; + } + public SlowRunningQueryBuilder(StreamInput in) throws IOException { super(in); this.sleepTime = in.readLong(); + this.index = in.readOptionalString(); } @Override @@ -51,12 +71,13 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(sleepTime); + out.writeOptionalString(index); } @Override @@ -78,7 +99,9 @@ protected Query doToQuery(SearchExecutionContext context) throws IOException { return new Query() { @Override public Weight createWeight(IndexSearcher searcher, ScoreMode scoreMode, float boost) throws IOException { - sleep(); + if (index == null || context.index().getName().equals(index)) { + sleep(); + } return delegate.createWeight(searcher, scoreMode, boost); } diff --git a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java index 38b7cb91bec87..cd3d195030c55 100644 --- a/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java +++ b/server/src/test/java/org/elasticsearch/search/query/ThrowingQueryBuilder.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; @@ -66,7 +67,7 @@ public ThrowingQueryBuilder(StreamInput in) throws IOException { this.randomUID = in.readLong(); this.failure = in.readException(); this.shardId = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_040)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { this.index = in.readOptionalString(); } else { this.index = null; @@ -78,7 +79,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(randomUID); out.writeException(failure); out.writeVInt(shardId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_040)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_040)) { out.writeOptionalString(index); } } @@ -140,6 +141,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/server/src/test/java/org/elasticsearch/search/sort/SortValueTests.java b/server/src/test/java/org/elasticsearch/search/sort/SortValueTests.java index cb23859620cb4..3539cfdc9709c 100644 --- a/server/src/test/java/org/elasticsearch/search/sort/SortValueTests.java +++ b/server/src/test/java/org/elasticsearch/search/sort/SortValueTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.document.InetAddressPoint; import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.InetAddresses; @@ -225,8 +226,8 @@ public void testSerializeBytesToOldVersion() { SortValue value = SortValue.from(new BytesRef("can't send me!")); TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_0_0, - TransportVersion.V_7_10_1 + TransportVersions.V_7_0_0, + TransportVersions.V_7_10_1 ); Exception e = expectThrows(IllegalArgumentException.class, () -> copyInstance(value, version)); assertThat( diff --git a/server/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java b/server/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java index 784bbdb2bc8e5..b6fa1e236a843 100644 --- a/server/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java +++ b/server/src/test/java/org/elasticsearch/search/suggest/SuggestTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.suggest; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -242,7 +243,7 @@ public void testMergingSuggestionOptions() { public void testSerialization() throws IOException { TransportVersion bwcVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current() ); diff --git a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java index 47360c23013ab..f52f8ecc1aff8 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/AbstractKnnVectorQueryBuilderTestCase.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.KnnFloatVectorQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -210,8 +211,8 @@ public void testBWCVersionSerializationFilters() throws IOException { TransportVersion beforeFilterVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_8_0_0, - TransportVersion.V_8_1_0 + TransportVersions.V_8_0_0, + TransportVersions.V_8_1_0 ); assertBWCSerialization(query, queryNoFilters, beforeFilterVersion); @@ -226,7 +227,7 @@ public void testBWCVersionSerializationSimilarity() throws IOException { query.numCands(), null ).queryName(query.queryName()).boost(query.boost()).addFilterQueries(query.filterQueries()); - assertBWCSerialization(query, queryNoSimilarity, TransportVersion.V_8_7_0); + assertBWCSerialization(query, queryNoSimilarity, TransportVersions.V_8_7_0); } public void testBWCVersionSerializationByteQuery() throws IOException { @@ -246,8 +247,8 @@ public void testBWCVersionSerializationByteQuery() throws IOException { TransportVersion beforeByteQueryVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_8_2_0, - TransportVersion.V_8_6_0 + TransportVersions.V_8_2_0, + TransportVersions.V_8_6_0 ); assertBWCSerialization(query, queryNoByteQuery, beforeByteQueryVersion); } diff --git a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java index 9fbac091faac6..a782560c672e9 100644 --- a/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java +++ b/server/src/test/java/org/elasticsearch/search/vectors/TestQueryVectorBuilderPlugin.java @@ -58,7 +58,7 @@ static class TestQueryVectorBuilder implements QueryVectorBuilder { } TestQueryVectorBuilder(StreamInput in) throws IOException { - this.vectorToBuild = in.readList(StreamInput::readFloat); + this.vectorToBuild = in.readCollectionAsList(StreamInput::readFloat); } @Override diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java index 06368a51a9db5..67a34bf4a08e9 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotResiliencyTests.java @@ -120,6 +120,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; import org.elasticsearch.cluster.service.MasterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; import org.elasticsearch.common.network.NetworkModule; @@ -2201,7 +2202,8 @@ public void start(ClusterState initialState) { new NoneCircuitBreakerService(), new Reconfigurator(clusterService.getSettings(), clusterService.getClusterSettings()), LeaderHeartbeatService.NO_OP, - StatefulPreVoteCollector::new + StatefulPreVoteCollector::new, + CompatibilityVersionsUtils.staticCurrent() ); masterService.setClusterStatePublisher(coordinator); coordinator.start(); diff --git a/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java b/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java index 2282827cae18f..ee7b929072bd1 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ScheduleWithFixedDelayTests.java @@ -11,18 +11,21 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler.Cancellable; import org.elasticsearch.threadpool.Scheduler.ReschedulingRunnable; -import org.elasticsearch.threadpool.ThreadPool.Names; import org.junit.After; import org.junit.Before; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -32,6 +35,7 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.sameInstance; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; @@ -70,20 +74,14 @@ public void testDoesNotRescheduleUntilExecutionFinished() throws Exception { Thread.currentThread().interrupt(); } }; - ReschedulingRunnable reschedulingRunnable = new ReschedulingRunnable( - runnable, - delay, - Names.GENERIC, - threadPool, - (e) -> {}, - (e) -> {} - ); + final Executor executor = mock(Executor.class); + ReschedulingRunnable reschedulingRunnable = new ReschedulingRunnable(runnable, delay, executor, threadPool, e -> {}, e -> {}); // not scheduled yet - verify(threadPool, never()).schedule(any(), any(), any()); + verify(threadPool, never()).schedule(any(), any(), any(Executor.class)); reschedulingRunnable.start(); // this call was made by start - verify(threadPool, times(1)).schedule(reschedulingRunnable, delay, Names.GENERIC); + verify(threadPool, times(1)).schedule(same(reschedulingRunnable), same(delay), same(executor)); // create a thread and start the runnable Thread runThread = new Thread() { @@ -103,7 +101,7 @@ public void run() { runThread.join(); // validate schedule was called again - verify(threadPool, times(2)).schedule(reschedulingRunnable, delay, Names.GENERIC); + verify(threadPool, times(2)).schedule(same(reschedulingRunnable), same(delay), same(executor)); } public void testThatRunnableIsRescheduled() throws Exception { @@ -120,7 +118,7 @@ public void testThatRunnableIsRescheduled() throws Exception { } }; - Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, TimeValue.timeValueMillis(10L), Names.GENERIC); + Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, TimeValue.timeValueMillis(10L), threadPool.generic()); assertNotNull(cancellable); // wait for the number of successful count down operations @@ -168,7 +166,7 @@ public void testCancellingRunnable() throws Exception { throw new RuntimeException("throw at end"); } }; - Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, TimeValue.timeValueMillis(10L), Names.GENERIC); + Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, TimeValue.timeValueMillis(10L), threadPool.generic()); cancellableRef.set(cancellable); // wait for the runnable to finish doneLatch.await(); @@ -202,7 +200,11 @@ public void testBlockingCallOnSchedulerThreadFails() throws Exception { } }; - Cancellable cancellable = threadPool.scheduleWithFixedDelay(runnable, TimeValue.timeValueMillis(10L), Names.SAME); + Cancellable cancellable = threadPool.scheduleWithFixedDelay( + runnable, + TimeValue.timeValueMillis(10L), + EsExecutors.DIRECT_EXECUTOR_SERVICE + ); Object resultingObject = resultsFuture.get(); assertNotNull(resultingObject); assertThat(resultingObject, instanceOf(Throwable.class)); @@ -235,7 +237,7 @@ public void testBlockingCallOnNonSchedulerThreadAllowed() throws Exception { } }; - final Cancellable cancellable = threadPool.scheduleWithFixedDelay(runnable, TimeValue.timeValueMillis(10L), Names.GENERIC); + final Cancellable cancellable = threadPool.scheduleWithFixedDelay(runnable, TimeValue.timeValueMillis(10L), threadPool.generic()); assertFalse(resultsFuture.isDone()); final Object o = new Object(); @@ -251,7 +253,7 @@ public void testOnRejectionCausesCancellation() throws Exception { terminate(threadPool); threadPool = new ThreadPool(Settings.builder().put(Node.NODE_NAME_SETTING.getKey(), "fixed delay tests").build()) { @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { if (command instanceof ReschedulingRunnable) { ((ReschedulingRunnable) command).onRejection(new EsRejectedExecutionException()); } else { @@ -264,7 +266,7 @@ public ScheduledCancellable schedule(Runnable command, TimeValue delay, String e ReschedulingRunnable reschedulingRunnable = new ReschedulingRunnable( runnable, delay, - Names.GENERIC, + threadPool.generic(), threadPool, (e) -> {}, (e) -> {} @@ -274,24 +276,48 @@ public ScheduledCancellable schedule(Runnable command, TimeValue delay, String e } public void testRunnableDoesNotRunAfterCancellation() throws Exception { - final int iterations = scaledRandomIntBetween(2, 12); - final AtomicInteger counter = new AtomicInteger(); - final CountDownLatch doneLatch = new CountDownLatch(iterations); - final Runnable countingRunnable = () -> { + int iterations = scaledRandomIntBetween(2, 12); + + // we don't have the cancellable until we schedule the task, which needs the barrier object to reference in the closure + // so break the circular dependency here + AtomicReference checkCancel = new AtomicReference<>(); + + AtomicInteger counter = new AtomicInteger(); + CyclicBarrier barrier = new CyclicBarrier(2, () -> checkCancel.get().run()); + Runnable countingRunnable = () -> { counter.incrementAndGet(); - doneLatch.countDown(); + try { + barrier.await(); + } catch (Exception e) { + throw new AssertionError(e); + } }; - final TimeValue interval = TimeValue.timeValueMillis(50L); - final Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, interval, Names.GENERIC); - doneLatch.await(); - cancellable.cancel(); + TimeValue interval = TimeValue.timeValueMillis(50L); + Cancellable cancellable = threadPool.scheduleWithFixedDelay(countingRunnable, interval, threadPool.generic()); + checkCancel.set(new Runnable() { + private int remaining = iterations; + + @Override + public void run() { + if (--remaining == 0) { + cancellable.cancel(); + } + } + }); + + for (int i = 0; i < iterations; i++) { + barrier.await(); + } + expectThrows(TimeoutException.class, () -> barrier.await(2 * interval.millis(), TimeUnit.MILLISECONDS)); - final int counterValue = counter.get(); - assertThat(counterValue, equalTo(iterations)); + assertThat(counter.get(), equalTo(iterations)); if (rarely()) { - assertBusy(() -> assertThat(counter.get(), equalTo(iterations)), 5 * interval.millis(), TimeUnit.MILLISECONDS); + assertBusy(() -> { + expectThrows(TimeoutException.class, () -> barrier.await(interval.millis(), TimeUnit.MILLISECONDS)); + assertThat(counter.get(), equalTo(iterations)); + }, 5 * interval.millis(), TimeUnit.MILLISECONDS); } } } diff --git a/server/src/test/java/org/elasticsearch/threadpool/SchedulerTests.java b/server/src/test/java/org/elasticsearch/threadpool/SchedulerTests.java index 3fb31bb12ad9e..3c682e572ae6d 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/SchedulerTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/SchedulerTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.threadpool; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.hamcrest.Matchers; @@ -39,7 +40,11 @@ public void testCancelOnThreadPool() { } private void scheduleAndCancel(ThreadPool threadPool, AtomicLong executed, String type) { - Scheduler.ScheduledCancellable scheduled = threadPool.schedule(executed::incrementAndGet, TimeValue.timeValueSeconds(20), type); + Scheduler.ScheduledCancellable scheduled = threadPool.schedule( + executed::incrementAndGet, + TimeValue.timeValueSeconds(20), + threadPool.executor(type) + ); assertEquals(1, schedulerQueueSize(threadPool)); assertFalse(scheduled.isCancelled()); assertTrue(scheduled.cancel()); @@ -53,16 +58,13 @@ private int schedulerQueueSize(ThreadPool threadPool) { public void testCancelOnScheduler() { ScheduledThreadPoolExecutor executor = Scheduler.initScheduler(Settings.EMPTY, "test-scheduler"); - Scheduler scheduler = (command, delay, name) -> Scheduler.wrapAsScheduledCancellable( - executor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS) - ); - + Scheduler scheduler = new ScheduledExecutorServiceScheduler(executor); AtomicLong executed = new AtomicLong(); try { Scheduler.ScheduledCancellable scheduled = scheduler.schedule( executed::incrementAndGet, TimeValue.timeValueSeconds(20), - ThreadPool.Names.SAME + EsExecutors.DIRECT_EXECUTOR_SERVICE ); assertEquals(1, executor.getQueue().size()); assertFalse(scheduled.isCancelled()); @@ -79,7 +81,7 @@ public void testDelay() throws InterruptedException { ThreadPool threadPool = new TestThreadPool("test"); try { List jobs = LongStream.range(20, 30) - .mapToObj(delay -> threadPool.schedule(() -> {}, TimeValue.timeValueSeconds(delay), ThreadPool.Names.SAME)) + .mapToObj(delay -> threadPool.schedule(() -> {}, TimeValue.timeValueSeconds(delay), EsExecutors.DIRECT_EXECUTOR_SERVICE)) .collect(Collectors.toCollection(ArrayList::new)); Collections.reverse(jobs); @@ -120,7 +122,13 @@ public void testScheduledOnThreadPool() throws InterruptedException { CountDownLatch missingExecutions = new CountDownLatch(ThreadPool.THREAD_POOL_TYPES.keySet().size()); try { ThreadPool.THREAD_POOL_TYPES.keySet() - .forEach(type -> threadPool.schedule(missingExecutions::countDown, TimeValue.timeValueMillis(randomInt(5)), type)); + .forEach( + type -> threadPool.schedule( + missingExecutions::countDown, + TimeValue.timeValueMillis(randomInt(5)), + threadPool.executor(type) + ) + ); assertTrue(missingExecutions.await(30, TimeUnit.SECONDS)); } finally { @@ -132,16 +140,14 @@ public void testScheduledOnThreadPool() throws InterruptedException { public void testScheduledOnScheduler() throws InterruptedException { final String schedulerName = "test-scheduler"; ScheduledThreadPoolExecutor executor = Scheduler.initScheduler(Settings.EMPTY, schedulerName); - Scheduler scheduler = (command, delay, name) -> Scheduler.wrapAsScheduledCancellable( - executor.schedule(command, delay.millis(), TimeUnit.MILLISECONDS) - ); + Scheduler scheduler = new ScheduledExecutorServiceScheduler(executor); CountDownLatch missingExecutions = new CountDownLatch(1); try { scheduler.schedule(() -> { assertThat(Thread.currentThread().getName(), containsString("[" + schedulerName + "]")); missingExecutions.countDown(); - }, TimeValue.timeValueMillis(randomInt(5)), ThreadPool.Names.SAME); + }, TimeValue.timeValueMillis(randomInt(5)), EsExecutors.DIRECT_EXECUTOR_SERVICE); assertTrue(missingExecutions.await(30, TimeUnit.SECONDS)); } finally { Scheduler.terminate(executor, 10, TimeUnit.SECONDS); diff --git a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java index 36b719bd97b4a..baa03ddf1abcd 100644 --- a/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java +++ b/server/src/test/java/org/elasticsearch/threadpool/ThreadPoolTests.java @@ -265,7 +265,7 @@ public void testInheritContextOnSchedule() throws InterruptedException { assertNull(threadPool.getThreadContext().getHeader("bar")); assertNull(threadPool.getThreadContext().getTransient("bar")); executed.countDown(); - }, TimeValue.timeValueMillis(randomInt(100)), randomFrom(ThreadPool.Names.SAME, ThreadPool.Names.GENERIC)); + }, TimeValue.timeValueMillis(randomInt(100)), randomFrom(EsExecutors.DIRECT_EXECUTOR_SERVICE, threadPool.generic())); threadPool.getThreadContext().putTransient("bar", "boom"); threadPool.getThreadContext().putHeader("bar", "boom"); latch.countDown(); @@ -310,7 +310,7 @@ public String toString() { return "slow-test-task"; } }; - threadPool.schedule(runnable, TimeValue.timeValueMillis(randomLongBetween(0, 300)), ThreadPool.Names.SAME); + threadPool.schedule(runnable, TimeValue.timeValueMillis(randomLongBetween(0, 300)), EsExecutors.DIRECT_EXECUTOR_SERVICE); assertBusy(appender::assertAllExpectationsMatched); } finally { Loggers.removeAppender(logger, appender); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java index 22bb64d1fa86b..f243a894a8f17 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundDecoderTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.ReleasableBytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -18,10 +19,12 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.transport.InboundDecoder.ChannelType; import java.io.IOException; import java.util.ArrayList; +import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.hasItems; import static org.hamcrest.Matchers.instanceOf; @@ -218,6 +221,94 @@ public void testDecodeHandshakeCompatibility() throws IOException { } + public void testClientChannelTypeFailsDecodingRequests() throws Exception { + String action = "test-request"; + long requestId = randomNonNegativeLong(); + if (randomBoolean()) { + final String headerKey = randomAlphaOfLength(10); + final String headerValue = randomAlphaOfLength(20); + if (randomBoolean()) { + threadContext.putHeader(headerKey, headerValue); + } else { + threadContext.addResponseHeader(headerKey, headerValue); + } + } + // a request + OutboundMessage message = new OutboundMessage.Request( + threadContext, + new TestRequest(randomAlphaOfLength(100)), + TransportHandshaker.REQUEST_HANDSHAKE_VERSION, + action, + requestId, + randomBoolean(), + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4, null) + ); + + try (RecyclerBytesStreamOutput os = new RecyclerBytesStreamOutput(recycler)) { + final BytesReference bytes = message.serialize(os); + try (InboundDecoder clientDecoder = new InboundDecoder(recycler, ChannelType.CLIENT)) { + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> clientDecoder.decode(ReleasableBytesReference.wrap(bytes), ignored -> {}) + ); + assertThat(e.getMessage(), containsString("client channels do not accept inbound requests, only responses")); + } + // the same message will be decoded by a server or mixed decoder + try (InboundDecoder decoder = new InboundDecoder(recycler, randomFrom(ChannelType.SERVER, ChannelType.MIX))) { + final ArrayList fragments = new ArrayList<>(); + int bytesConsumed = decoder.decode(ReleasableBytesReference.wrap(bytes), fragments::add); + int totalHeaderSize = TcpHeader.headerSize(TransportVersion.current()) + bytes.getInt( + TcpHeader.VARIABLE_HEADER_SIZE_POSITION + ); + assertEquals(totalHeaderSize, bytesConsumed); + final Header header = (Header) fragments.get(0); + assertEquals(requestId, header.getRequestId()); + } + } + } + + public void testServerChannelTypeFailsDecodingResponses() throws Exception { + long requestId = randomNonNegativeLong(); + if (randomBoolean()) { + final String headerKey = randomAlphaOfLength(10); + final String headerValue = randomAlphaOfLength(20); + if (randomBoolean()) { + threadContext.putHeader(headerKey, headerValue); + } else { + threadContext.addResponseHeader(headerKey, headerValue); + } + } + // a response + OutboundMessage message = new OutboundMessage.Response( + threadContext, + new TestResponse(randomAlphaOfLength(100)), + TransportHandshaker.REQUEST_HANDSHAKE_VERSION, + requestId, + randomBoolean(), + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4, null) + ); + + try (RecyclerBytesStreamOutput os = new RecyclerBytesStreamOutput(recycler)) { + final BytesReference bytes = message.serialize(os); + try (InboundDecoder decoder = new InboundDecoder(recycler, ChannelType.SERVER)) { + final ReleasableBytesReference releasable1 = ReleasableBytesReference.wrap(bytes); + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> decoder.decode(releasable1, ignored -> {})); + assertThat(e.getMessage(), containsString("server channels do not accept inbound responses, only requests")); + } + // the same message will be decoded by a client or mixed decoder + try (InboundDecoder decoder = new InboundDecoder(recycler, randomFrom(ChannelType.CLIENT, ChannelType.MIX))) { + final ArrayList fragments = new ArrayList<>(); + int bytesConsumed = decoder.decode(ReleasableBytesReference.wrap(bytes), fragments::add); + int totalHeaderSize = TcpHeader.headerSize(TransportVersion.current()) + bytes.getInt( + TcpHeader.VARIABLE_HEADER_SIZE_POSITION + ); + assertEquals(totalHeaderSize, bytesConsumed); + final Header header = (Header) fragments.get(0); + assertEquals(requestId, header.getRequestId()); + } + } + } + public void testCompressedDecode() throws IOException { boolean isRequest = randomBoolean(); String action = "test-request"; @@ -343,7 +434,7 @@ public void testCompressedDecodeHandshakeCompatibility() throws IOException { public void testVersionIncompatibilityDecodeException() throws IOException { String action = "test-request"; long requestId = randomNonNegativeLong(); - TransportVersion incompatibleVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); + TransportVersion incompatibleVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE); OutboundMessage message = new OutboundMessage.Request( threadContext, new TestRequest(randomAlphaOfLength(100)), @@ -372,13 +463,13 @@ public void testVersionIncompatibilityDecodeException() throws IOException { public void testCheckVersionCompatibility() { try { InboundDecoder.checkVersionCompatibility( - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.MINIMUM_COMPATIBLE, TransportVersion.current()) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current()) ); } catch (IllegalStateException e) { throw new AssertionError(e); } - TransportVersion invalid = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); + TransportVersion invalid = TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE); try { InboundDecoder.checkVersionCompatibility(invalid); fail(); @@ -387,7 +478,7 @@ public void testCheckVersionCompatibility() { "Received message from unsupported version: [" + invalid + "] minimal compatible version is: [" - + TransportVersion.MINIMUM_COMPATIBLE + + TransportVersions.MINIMUM_COMPATIBLE + "]", expected.getMessage() ); diff --git a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java index 5ed1a2bfee7ea..bd4be3f3bf49c 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundHandlerTests.java @@ -13,6 +13,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -26,6 +27,7 @@ import org.elasticsearch.common.network.HandlingTimeTracker; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.tasks.TaskId; @@ -111,7 +113,7 @@ public void testPing() throws Exception { TestRequest::new, taskManager, (request, channel, task) -> channelCaptor.set(channel), - ThreadPool.Names.SAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, false, true, Tracer.NOOP @@ -164,7 +166,7 @@ public TestResponse read(StreamInput in) throws IOException { channelCaptor.set(channel); requestCaptor.set(request); }, - ThreadPool.Names.SAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, false, true, Tracer.NOOP @@ -251,7 +253,7 @@ public void testClosesChannelOnErrorInHandshake() throws Exception { final TransportVersion remoteVersion = TransportVersionUtils.randomVersionBetween( random(), TransportVersionUtils.getFirstVersion(), - TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE) + TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE) ); final long requestId = randomNonNegativeLong(); final Header requestHeader = new Header( diff --git a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java index b0ed33da16766..19f1c4d90c20a 100644 --- a/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java +++ b/server/src/test/java/org/elasticsearch/transport/InboundPipelineTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.common.breaker.NoopCircuitBreaker; @@ -98,7 +99,7 @@ public void testPipelineHandling() throws IOException { toRelease.clear(); try (RecyclerBytesStreamOutput streamOutput = new RecyclerBytesStreamOutput(recycler)) { while (streamOutput.size() < BYTE_THRESHOLD) { - final TransportVersion version = randomFrom(TransportVersion.current(), TransportVersion.MINIMUM_COMPATIBLE); + final TransportVersion version = randomFrom(TransportVersion.current(), TransportVersions.MINIMUM_COMPATIBLE); final String value = randomRealisticUnicodeOfCodepointLength(randomIntBetween(200, 400)); final boolean isRequest = randomBoolean(); Compression.Scheme compressionScheme = getCompressionScheme(version); @@ -215,7 +216,7 @@ public void testDecodeExceptionIsPropagated() throws IOException { try (RecyclerBytesStreamOutput streamOutput = new RecyclerBytesStreamOutput(recycler)) { String actionName = "actionName"; - final TransportVersion invalidVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); + final TransportVersion invalidVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE); final String value = randomAlphaOfLength(1000); final boolean isRequest = randomBoolean(); final long requestId = randomNonNegativeLong(); diff --git a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java index 8cab2c2a0e2eb..d2941bab3f91a 100644 --- a/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/ProxyConnectionStrategyTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterName; @@ -237,7 +238,7 @@ public void testConnectFailsWithIncompatibleNodes() { IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current() ); - TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); + TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE); try (MockTransportService transport1 = startTransport("incompatible-node", incompatibleVersion, incompatibleTransportVersion)) { TransportAddress address1 = transport1.boundAddress().publishAddress(); diff --git a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java index ffcbf306417c9..bc5709c77b74d 100644 --- a/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/transport/RemoteClusterServiceTests.java @@ -7,6 +7,7 @@ */ package org.elasticsearch.transport; +import org.apache.logging.log4j.Level; import org.elasticsearch.TransportVersion; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.OriginalIndices; @@ -26,6 +27,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.Node; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.MockLogAppender; import org.elasticsearch.test.transport.MockTransportService; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -44,6 +46,7 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; +import static org.elasticsearch.test.MockLogAppender.assertThatLogger; import static org.elasticsearch.test.NodeRoles.masterOnlyNode; import static org.elasticsearch.test.NodeRoles.nonMasterNode; import static org.elasticsearch.test.NodeRoles.onlyRoles; @@ -863,7 +866,7 @@ public void testRemoteNodeRoles() throws IOException, InterruptedException { } } - private ActionListener connectionListener(final CountDownLatch latch) { + private ActionListener connectionListener(final CountDownLatch latch) { return ActionTestUtils.assertNoFailureListener(x -> latch.countDown()); } @@ -1423,6 +1426,53 @@ public void testUseDifferentTransportProfileForCredentialsProtectedRemoteCluster } } + public void testLogsConnectionResult() throws IOException { + + try ( + var remote = startTransport("remote", List.of(), VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY); + var local = startTransport("local", List.of(), VersionInformation.CURRENT, TransportVersion.current(), Settings.EMPTY); + var remoteClusterService = new RemoteClusterService(Settings.EMPTY, local) + ) { + var clusterSettings = ClusterSettings.createBuiltInClusterSettings(); + remoteClusterService.listenForUpdates(clusterSettings); + + assertThatLogger( + () -> clusterSettings.applySettings( + Settings.builder().putList("cluster.remote.remote_1.seeds", remote.getLocalDiscoNode().getAddress().toString()).build() + ), + RemoteClusterService.class, + new MockLogAppender.SeenEventExpectation( + "Should log when connecting to remote", + RemoteClusterService.class.getCanonicalName(), + Level.INFO, + "remote cluster connection [remote_1] updated: CONNECTED" + ) + ); + + assertThatLogger( + () -> clusterSettings.applySettings(Settings.EMPTY), + RemoteClusterService.class, + new MockLogAppender.SeenEventExpectation( + "Should log when disconnecting from remote", + RemoteClusterService.class.getCanonicalName(), + Level.INFO, + "remote cluster connection [remote_1] updated: DISCONNECTED" + ) + ); + + assertThatLogger( + () -> clusterSettings.applySettings(Settings.builder().put(randomIdentifier(), randomIdentifier()).build()), + RemoteClusterService.class, + new MockLogAppender.UnseenEventExpectation( + "Should not log when changing unrelated setting", + RemoteClusterService.class.getCanonicalName(), + Level.INFO, + "*" + ) + ); + } + } + private static Settings createSettings(String clusterAlias, List seeds) { Settings.Builder builder = Settings.builder(); builder.put( diff --git a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java index ac3161af4f650..577f5d2c7dc18 100644 --- a/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/SniffConnectionStrategyTests.java @@ -9,6 +9,7 @@ package org.elasticsearch.transport; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; @@ -375,7 +376,7 @@ public void testDiscoverWithSingleIncompatibleSeedNode() { IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current() ); - TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); + TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE); try ( MockTransportService seedTransport = startTransport( "seed_node", @@ -454,7 +455,7 @@ public void testConnectFailsWithIncompatibleNodes() { IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current() ); - TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE); + TransportVersion incompatibleTransportVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE); try ( MockTransportService incompatibleSeedTransport = startTransport( "seed_node", diff --git a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java index 89665ba6dba2e..704de4fb2276a 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportActionProxyTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.VersionInformation; @@ -54,7 +55,7 @@ public class TransportActionProxyTests extends ESTestCase { IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current() ); - protected static final TransportVersion transportVersion0 = TransportVersion.MINIMUM_COMPATIBLE; + protected static final TransportVersion transportVersion0 = TransportVersions.MINIMUM_COMPATIBLE; protected DiscoveryNode nodeA; protected MockTransportService serviceA; diff --git a/server/src/test/java/org/elasticsearch/transport/TransportKeepAliveTests.java b/server/src/test/java/org/elasticsearch/transport/TransportKeepAliveTests.java index 24a5487687882..08bdf6f31d24c 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportKeepAliveTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportKeepAliveTests.java @@ -21,6 +21,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.Deque; +import java.util.concurrent.Executor; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; @@ -190,7 +191,11 @@ private CapturingThreadPool() { } @Override - public ScheduledCancellable schedule(Runnable task, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable task, TimeValue delay, Executor executor) { + return doSchedule(task, delay); + } + + private ScheduledCancellable doSchedule(Runnable task, TimeValue delay) { scheduledTasks.add(new Tuple<>(delay, task)); return null; } diff --git a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java index 09ba175ba16db..ad6e33afa2559 100644 --- a/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java +++ b/server/src/test/java/org/elasticsearch/transport/TransportServiceHandshakeTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.Build; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -40,6 +41,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import static java.util.Collections.emptyMap; @@ -207,7 +209,7 @@ public void testIncompatibleNodeVersions() { TransportService transportServiceB = startServices( "TS_B", settings, - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, new VersionInformation( VersionUtils.getPreviousVersion(Version.CURRENT.minimumCompatibilityVersion()), IndexVersion.MINIMUM_COMPATIBLE, @@ -258,7 +260,7 @@ public void testIncompatibleTransportVersions() { TransportService transportServiceB = startServices( "TS_B", settings, - TransportVersionUtils.getPreviousVersion(TransportVersion.MINIMUM_COMPATIBLE), + TransportVersionUtils.getPreviousVersion(TransportVersions.MINIMUM_COMPATIBLE), new VersionInformation(Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()), TransportService.NOOP_TRANSPORT_INTERCEPTOR ); @@ -407,7 +409,7 @@ public void testAcceptsMismatchedBuildHashFromDifferentVersion() { final TransportService transportServiceB = startServices( "TS_B", Settings.builder().put("cluster.name", "a").build(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, new VersionInformation(Version.CURRENT.minimumCompatibilityVersion(), IndexVersion.MINIMUM_COMPATIBLE, IndexVersion.current()), transportInterceptorB ); @@ -426,7 +428,7 @@ public void setModifyBuildHash(boolean modifyBuildHash) { @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { diff --git a/settings.gradle b/settings.gradle index fbaf783d76aa7..09aaef7ede189 100644 --- a/settings.gradle +++ b/settings.gradle @@ -14,7 +14,7 @@ pluginManagement { } plugins { - id "com.gradle.enterprise" version "3.13.1" + id "com.gradle.enterprise" version "3.14.1" id 'elasticsearch.java-toolchain' } diff --git a/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java b/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java index 0549d1b3053b2..06985c659ecbd 100644 --- a/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java +++ b/test/external-modules/delayed-aggs/src/main/java/org/elasticsearch/test/delayedshard/DelayedShardAggregationBuilder.java @@ -9,6 +9,7 @@ package org.elasticsearch.test.delayedshard; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; @@ -134,6 +135,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_10_0; + return TransportVersions.V_7_10_0; } } diff --git a/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java b/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java index 6d83424724d82..a39d8a521474f 100644 --- a/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java +++ b/test/external-modules/die-with-dignity/src/javaRestTest/java/org/elasticsearch/qa/die_with_dignity/DieWithDignityIT.java @@ -8,7 +8,9 @@ package org.elasticsearch.qa.die_with_dignity; +import org.apache.lucene.util.Constants; import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; import org.elasticsearch.core.PathUtils; import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.cluster.LogType; @@ -25,6 +27,8 @@ import java.nio.file.Files; import java.nio.file.Path; import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; @@ -36,7 +40,7 @@ public class DieWithDignityIT extends ESRestTestCase { .distribution(DistributionType.INTEG_TEST) .module("test-die-with-dignity") .setting("xpack.security.enabled", "false") - .environment("CLI_JAVA_OPTS", "-Ddie.with.dignity.test=true") + .jvmArg("-Ddie.with.dignity.test=true") .jvmArg("-XX:-ExitOnOutOfMemoryError") .build(); @@ -45,9 +49,8 @@ protected String getTestRestCluster() { return cluster.getHttpAddresses(); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/97789") public void testDieWithDignity() throws Exception { - final long pid = cluster.getPid(0); + final long pid = getElasticsearchPid(); assertJvmArgs(pid, containsString("-Ddie.with.dignity.test=true")); expectThrows(IOException.class, () -> client().performRequest(new Request("GET", "/_die_with_dignity"))); @@ -75,11 +78,26 @@ public void testDieWithDignity() throws Exception { assertTrue(fatalErrorInThreadExiting); } - private void assertJvmArgs(long pid, Matcher matcher) throws IOException { + private Process startJcmd(long pid) throws IOException { final String jcmdPath = PathUtils.get(System.getProperty("tests.runtime.java"), "bin/jcmd").toString(); - final Process jcmdProcess = new ProcessBuilder().command(jcmdPath, Long.toString(pid), "VM.command_line") - .redirectErrorStream(true) - .start(); + return new ProcessBuilder().command(jcmdPath, Long.toString(pid), "VM.command_line").redirectErrorStream(true).start(); + } + + private void assertJvmArgs(long pid, Matcher matcher) throws IOException, InterruptedException { + Process jcmdProcess = startJcmd(pid); + + if (Constants.WINDOWS) { + // jcmd on windows appears to have a subtle bug where if the process being connected to + // dies while jcmd is running, it can hang indefinitely. Here we detect this case by + // waiting a fixed amount of time, and then killing/retrying the process + boolean exited = jcmdProcess.waitFor(10, TimeUnit.SECONDS); + if (exited == false) { + logger.warn("jcmd hung, killing process and retrying"); + jcmdProcess.destroyForcibly(); + jcmdProcess = startJcmd(pid); + } + } + List outputLines = readLines(jcmdProcess.getInputStream()); String jvmArgs = null; @@ -99,6 +117,18 @@ private void assertJvmArgs(long pid, Matcher matcher) throws IOException } } + private long getElasticsearchPid() throws IOException { + Response response = client().performRequest(new Request("GET", "/_nodes/process")); + @SuppressWarnings("unchecked") + var nodesInfo = (Map) entityAsMap(response).get("nodes"); + @SuppressWarnings("unchecked") + var nodeInfo = (Map) nodesInfo.values().iterator().next(); + @SuppressWarnings("unchecked") + var processInfo = (Map) nodeInfo.get("process"); + Object stringPid = processInfo.get("id"); + return Long.parseLong(stringPid.toString()); + } + private List readLines(InputStream is) throws IOException { try (BufferedReader in = new BufferedReader(new InputStreamReader(is, StandardCharsets.UTF_8))) { return in.lines().toList(); diff --git a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java index 39a8191453686..1a424c54821e8 100644 --- a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java +++ b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/ErrorQueryBuilder.java @@ -15,6 +15,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.HeaderWarning; @@ -90,12 +91,12 @@ public ErrorQueryBuilder(List indices) { public ErrorQueryBuilder(StreamInput in) throws IOException { super(in); - this.indices = in.readList(IndexError::new); + this.indices = in.readCollectionAsList(IndexError::new); } @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeList(indices); + out.writeCollection(indices); } @Override @@ -164,7 +165,7 @@ protected int doHashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } static void sleep(long millis) { diff --git a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java index 8f7dc633e122d..19284152efab6 100644 --- a/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java +++ b/test/external-modules/error-query/src/main/java/org/elasticsearch/test/errorquery/IndexError.java @@ -8,7 +8,7 @@ package org.elasticsearch.test.errorquery; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -53,7 +53,7 @@ public IndexError(StreamInput in) throws IOException { this.shardIds = in.readBoolean() ? in.readIntArray() : null; this.errorType = in.readEnum(ERROR_TYPE.class); this.message = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_051)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_051)) { this.stallTimeSeconds = in.readVInt(); } else { this.stallTimeSeconds = 0; @@ -69,7 +69,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeEnum(errorType); out.writeString(message); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_051)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_051)) { out.writeVInt(stallTimeSeconds); } } diff --git a/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepository.java b/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepository.java index 873577136cc23..c184dca3887bd 100644 --- a/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepository.java +++ b/test/external-modules/latency-simulating-directory/src/main/java/org/elasticsearch/test/simulatedlatencyrepo/LatencySimulatingBlobStoreRepository.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.io.InputStream; +import java.util.Iterator; class LatencySimulatingBlobStoreRepository extends FsRepository { @@ -50,6 +51,11 @@ public BlobContainer blobContainer(BlobPath path) { return new LatencySimulatingBlobContainer(blobContainer); } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + fsBlobStore.deleteBlobsIgnoringIfNotExists(blobNames); + } + @Override public void close() throws IOException { fsBlobStore.close(); diff --git a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/NodeSeekStats.java b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/NodeSeekStats.java index db743c1d976e1..8b2d95c3cf57e 100644 --- a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/NodeSeekStats.java +++ b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/NodeSeekStats.java @@ -31,13 +31,13 @@ public NodeSeekStats(DiscoveryNode node, Map> seeks public NodeSeekStats(StreamInput in) throws IOException { super(in); - this.seeks = in.readMap(s -> s.readList(ShardSeekStats::new)); + this.seeks = in.readMap(s -> s.readCollectionAsList(ShardSeekStats::new)); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(seeks, StreamOutput::writeString, StreamOutput::writeList); + out.writeMap(seeks, StreamOutput::writeCollection); } @Override diff --git a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekStatsResponse.java b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekStatsResponse.java index 8696c19b76220..c07d4bf18e603 100644 --- a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekStatsResponse.java +++ b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/SeekStatsResponse.java @@ -36,12 +36,12 @@ public SeekStatsResponse(StreamInput in) throws IOException { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeSeekStats::new); + return in.readCollectionAsList(NodeSeekStats::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/ShardSeekStats.java b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/ShardSeekStats.java index 702d648d1c34d..1f904c0807fb4 100644 --- a/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/ShardSeekStats.java +++ b/test/external-modules/seek-tracking-directory/src/main/java/org/elasticsearch/test/seektracker/ShardSeekStats.java @@ -26,7 +26,7 @@ public ShardSeekStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(this.shard); - out.writeMap(this.seeksPerFile, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(this.seeksPerFile, StreamOutput::writeLong); } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java b/test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java index fb1342397b0d6..eecf9f7c6e851 100644 --- a/test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java +++ b/test/framework/src/main/java/org/elasticsearch/KnownTransportVersions.java @@ -17,5 +17,5 @@ public class KnownTransportVersions { /** * A sorted list of all known transport versions */ - public static final List ALL_VERSIONS = List.copyOf(TransportVersion.getAllVersions()); + public static final List ALL_VERSIONS = List.copyOf(TransportVersions.getAllVersions()); } diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java index a815a2eb9f28b..08ae0b028e5c7 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/coordination/AbstractCoordinatorTestCase.java @@ -42,6 +42,7 @@ import org.elasticsearch.cluster.service.ClusterApplierService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.FakeThreadPoolMasterService; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; @@ -1162,7 +1163,8 @@ public RecyclerBytesStreamOutput newNetworkBytesStream() { new NoneCircuitBreakerService(), coordinationServices.getReconfigurator(), coordinationServices.getLeaderHeartbeatService(), - coordinationServices.getPreVoteCollectorFactory() + coordinationServices.getPreVoteCollectorFactory(), + CompatibilityVersionsUtils.staticCurrent() ); coordinationDiagnosticsService = new CoordinationDiagnosticsService( clusterService, diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersionsUtils.java b/test/framework/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersionsUtils.java new file mode 100644 index 0000000000000..80815dffa4a30 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/cluster/version/CompatibilityVersionsUtils.java @@ -0,0 +1,39 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.version; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.test.TransportVersionUtils; + +public class CompatibilityVersionsUtils { + + /** + * Current compatibility versions that can be determined statically + * + *

Some of our compatibility versions may be constructed at runtime, but in + * many tests those will not be needed. This utility method returns only the "current" + * values for statically defined versions, like {@link TransportVersion#current()}. + * + * @return Compatibility versions known at compile time. + */ + public static CompatibilityVersions staticCurrent() { + return new CompatibilityVersions(TransportVersion.current()); + } + + /** + * Random versions of values that can be chosen statically (as opposed to those + * that are loaded from plugins at startup time). + * + *

Like {@link #staticCurrent()}, but with random valid versions. + * @return Random valid compatibility versions + */ + public static CompatibilityVersions staticRandom() { + return new CompatibilityVersions(TransportVersionUtils.randomVersion()); + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java b/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java index 0510ed59927d8..96b3510ed6afe 100644 --- a/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java +++ b/test/framework/src/main/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueue.java @@ -28,6 +28,7 @@ import java.util.Random; import java.util.concurrent.Callable; import java.util.concurrent.Delayed; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; @@ -354,7 +355,7 @@ public ExecutorService executor(String name) { } @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { final int NOT_STARTED = 0; final int STARTED = 1; final int CANCELLED = 2; @@ -399,11 +400,6 @@ public boolean isCancelled() { }; } - @Override - public Cancellable scheduleWithFixedDelay(Runnable command, TimeValue interval, String executor) { - return super.scheduleWithFixedDelay(command, interval, executor); - } - @Override public void shutdown() { throw new UnsupportedOperationException(); diff --git a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java index 9aea805b55b30..42fb21d13739f 100644 --- a/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java +++ b/test/framework/src/main/java/org/elasticsearch/gateway/MockGatewayMetaState.java @@ -8,13 +8,14 @@ package org.elasticsearch.gateway; -import org.elasticsearch.TransportVersion; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.IndexMetadataVerifier; import org.elasticsearch.cluster.metadata.Manifest; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.cluster.version.CompatibilityVersions; +import org.elasticsearch.cluster.version.CompatibilityVersionsUtils; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; @@ -50,9 +51,14 @@ Metadata upgradeMetadataForNode(Metadata metadata, IndexMetadataVerifier indexMe } @Override - ClusterState prepareInitialClusterState(TransportService transportService, ClusterService clusterService, ClusterState clusterState) { + ClusterState prepareInitialClusterState( + TransportService transportService, + ClusterService clusterService, + ClusterState clusterState, + CompatibilityVersions compatibilityVersions + ) { // Just set localNode here, not to mess with ClusterService and IndicesService mocking - return ClusterStateUpdaters.setLocalNode(clusterState, localNode, TransportVersion.current()); + return ClusterStateUpdaters.setLocalNode(clusterState, localNode, compatibilityVersions); } public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXContentRegistry xContentRegistry) { @@ -81,7 +87,8 @@ public void start(Settings settings, NodeEnvironment nodeEnvironment, NamedXCont new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L ), - List.of() + List.of(), + CompatibilityVersionsUtils.staticCurrent() ); } } diff --git a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java index c45e08c857f48..bc58a792cefc6 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/mapper/MapperTestCase.java @@ -441,6 +441,10 @@ protected String[] getParseMinimalWarnings() { return Strings.EMPTY_ARRAY; } + protected String[] getParseMinimalWarnings(IndexVersion indexVersion) { + return getParseMinimalWarnings(); + } + protected String[] getParseMaximalWarnings() { // Most mappers don't emit any warnings return Strings.EMPTY_ARRAY; @@ -494,24 +498,26 @@ public final void testMeta() throws IOException { ); } - public final void testDeprecatedBoost() throws IOException { + public final void testDeprecatedBoostWarning() throws IOException { try { createMapperService(DEPRECATED_BOOST_INDEX_VERSION, fieldMapping(b -> { minimalMapping(b, DEPRECATED_BOOST_INDEX_VERSION); b.field("boost", 2.0); })); String[] warnings = Strings.concatStringArrays( - getParseMinimalWarnings(), + getParseMinimalWarnings(DEPRECATED_BOOST_INDEX_VERSION), new String[] { "Parameter [boost] on field [field] is deprecated and has no effect" } ); assertWarnings(warnings); } catch (MapperParsingException e) { assertThat(e.getMessage(), anyOf(containsString("Unknown parameter [boost]"), containsString("[boost : 2.0]"))); } + } + public void testBoostNotAllowed() throws IOException { MapperParsingException e = expectThrows( MapperParsingException.class, - () -> createMapperService(IndexVersion.V_8_0_0, fieldMapping(b -> { + () -> createMapperService(boostNotAllowedIndexVersion(), fieldMapping(b -> { minimalMapping(b); b.field("boost", 2.0); })) @@ -521,6 +527,10 @@ public final void testDeprecatedBoost() throws IOException { assertParseMinimalWarnings(); } + protected IndexVersion boostNotAllowedIndexVersion() { + return IndexVersion.V_8_0_0; + } + /** * Use a {@linkplain ValueFetcher} to extract values from doc values. */ diff --git a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java index 4ed2b6b1eb808..5a6d8bb878af8 100644 --- a/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/index/shard/IndexShardTestCase.java @@ -223,12 +223,32 @@ protected IndexShard newShard( Settings settings, EngineFactory engineFactory, final IndexingOperationListener... listeners + ) throws IOException { + return newShard(primary, new ShardId("index", "_na_", 0), settings, engineFactory, listeners); + } + + /** + * Creates a new initializing shard. The shard will have its own unique data path. + * + * @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica (ready to recover from + * another shard) + * @param shardId the shard ID for this shard + * @param settings the settings to use for this shard + * @param engineFactory the engine factory to use for this shard + * @param listeners the indexing operation listeners to add + */ + protected IndexShard newShard( + boolean primary, + ShardId shardId, + Settings settings, + EngineFactory engineFactory, + final IndexingOperationListener... listeners ) throws IOException { final RecoverySource recoverySource = primary ? RecoverySource.EmptyStoreRecoverySource.INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE; final ShardRouting shardRouting = TestShardRouting.newShardRouting( - new ShardId("index", "_na_", 0), + shardId, randomAlphaOfLength(10), primary, ShardRoutingState.INITIALIZING, @@ -482,7 +502,7 @@ protected IndexShard newShard( xContentRegistry(), createTempDir(), indexSettings.getSettings(), - "index" + routing.getIndexName() ); mapperService.merge(indexMetadata, MapperService.MergeReason.MAPPING_RECOVERY); SimilarityService similarityService = new SimilarityService(indexSettings, null, Collections.emptyMap()); @@ -712,7 +732,10 @@ public static void updateRoutingEntry(IndexShard shard, ShardRouting shardRoutin protected void recoveryEmptyReplica(IndexShard replica, boolean startReplica) throws IOException { IndexShard primary = null; try { - primary = newStartedShard(true); + primary = newStartedShard( + p -> newShard(p, replica.routingEntry().shardId(), replica.indexSettings.getSettings(), new InternalEngineFactory()), + true + ); recoverReplica(replica, primary, startReplica); } finally { closeShards(primary); diff --git a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java index 7ee3b146fccff..e97d21ae53697 100644 --- a/test/framework/src/main/java/org/elasticsearch/node/MockNode.java +++ b/test/framework/src/main/java/org/elasticsearch/node/MockNode.java @@ -30,6 +30,8 @@ import org.elasticsearch.indices.recovery.RecoverySettings; import org.elasticsearch.plugins.MockPluginsService; import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.readiness.MockReadinessService; +import org.elasticsearch.readiness.ReadinessService; import org.elasticsearch.script.MockScriptService; import org.elasticsearch.script.ScriptContext; import org.elasticsearch.script.ScriptEngine; @@ -184,6 +186,14 @@ protected ScriptService newScriptService( return new MockScriptService(settings, engines, contexts); } + @Override + protected ReadinessService newReadinessService(ClusterService clusterService, Environment environment) { + if (getPluginsService().filterPlugins(MockReadinessService.TestPlugin.class).isEmpty()) { + return super.newReadinessService(clusterService, environment); + } + return new MockReadinessService(clusterService, environment); + } + @Override protected TransportService newTransportService( Settings settings, diff --git a/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java b/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java new file mode 100644 index 0000000000000..6b58a6ce36117 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/readiness/MockReadinessService.java @@ -0,0 +1,109 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.readiness; + +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.env.Environment; +import org.elasticsearch.node.MockNode; +import org.elasticsearch.plugins.Plugin; + +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.net.SocketAddress; +import java.net.SocketOption; +import java.nio.channels.ServerSocketChannel; +import java.nio.channels.SocketChannel; +import java.nio.channels.spi.SelectorProvider; +import java.util.Set; + +public class MockReadinessService extends ReadinessService { + /** + * Marker plugin used by {@link MockNode} to enable {@link MockReadinessService}. + */ + public static class TestPlugin extends Plugin {} + + private static final String METHOD_NOT_MOCKED = "This method has not been mocked"; + + private static class MockServerSocketChannel extends ServerSocketChannel { + + static ServerSocketChannel openMock() { + return new MockServerSocketChannel(); + } + + private MockServerSocketChannel() { + super(SelectorProvider.provider()); + } + + @Override + public ServerSocketChannel bind(SocketAddress local, int backlog) { + assert isOpen(); + return this; + } + + @Override + public ServerSocketChannel setOption(SocketOption name, T value) { + throw new UnsupportedOperationException(METHOD_NOT_MOCKED); + } + + @Override + public T getOption(SocketOption name) { + throw new UnsupportedOperationException(METHOD_NOT_MOCKED); + } + + @Override + public Set> supportedOptions() { + throw new UnsupportedOperationException(METHOD_NOT_MOCKED); + } + + @Override + public ServerSocket socket() { + throw new UnsupportedOperationException(METHOD_NOT_MOCKED); + } + + @Override + public SocketChannel accept() { + return null; + } + + @Override + public SocketAddress getLocalAddress() { + return new InetSocketAddress(InetAddress.getLoopbackAddress(), 0); + } + + @Override + protected void implCloseSelectableChannel() {} + + @Override + protected void implConfigureBlocking(boolean block) { + throw new UnsupportedOperationException(METHOD_NOT_MOCKED); + } + } + + public MockReadinessService(ClusterService clusterService, Environment environment) { + super(clusterService, environment, MockServerSocketChannel::openMock); + } + + static void tcpReadinessProbeTrue(ReadinessService readinessService) { + ServerSocketChannel mockedSocket = readinessService.serverChannel(); + if (mockedSocket == null) { + throw new AssertionError("Mocked socket not created for this node"); + } + if (mockedSocket.isOpen() == false) { + throw new AssertionError("Readiness socket should be open"); + } + } + + static void tcpReadinessProbeFalse(ReadinessService readinessService) { + ServerSocketChannel mockedSocket = readinessService.serverChannel(); + if (mockedSocket != null && mockedSocket.isOpen()) { + throw new AssertionError("Readiness socket should be closed"); + } + } +} diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java index 86ec7c77d14e5..d1b211128c108 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/ESBlobStoreRepositoryIntegTestCase.java @@ -40,6 +40,7 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; @@ -53,6 +54,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; @@ -481,6 +483,39 @@ public void testIndicesDeletedFromRepository() throws Exception { assertAcked(clusterAdmin().prepareDeleteSnapshot(repoName, "test-snap2").get()); } + public void testBlobStoreBulkDeletion() throws Exception { + Map> expectedBlobsPerContainer = new HashMap<>(); + try (BlobStore store = newBlobStore()) { + List blobsToDelete = new ArrayList<>(); + int numberOfContainers = randomIntBetween(2, 5); + for (int i = 0; i < numberOfContainers; i++) { + BlobPath containerPath = BlobPath.EMPTY.add(randomIdentifier()); + final BlobContainer container = store.blobContainer(containerPath); + int numberOfBlobsPerContainer = randomIntBetween(5, 10); + for (int j = 0; j < numberOfBlobsPerContainer; j++) { + byte[] bytes = randomBytes(randomInt(100)); + String blobName = randomAlphaOfLength(10); + container.writeBlob(blobName, new BytesArray(bytes), false); + if (randomBoolean()) { + blobsToDelete.add(containerPath.buildAsString() + blobName); + } else { + expectedBlobsPerContainer.computeIfAbsent(containerPath, unused -> new ArrayList<>()).add(blobName); + } + } + } + + store.deleteBlobsIgnoringIfNotExists(blobsToDelete.iterator()); + for (var containerEntry : expectedBlobsPerContainer.entrySet()) { + BlobContainer blobContainer = store.blobContainer(containerEntry.getKey()); + Map blobsInContainer = blobContainer.listBlobs(); + for (String expectedBlob : containerEntry.getValue()) { + assertThat(blobsInContainer, hasKey(expectedBlob)); + } + blobContainer.delete(); + } + } + } + protected void addRandomDocuments(String name, int numDocs) throws InterruptedException { IndexRequestBuilder[] indexRequestBuilders = new IndexRequestBuilder[numDocs]; for (int i = 0; i < numDocs; i++) { diff --git a/test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java index b936feebd4901..bda5fe825b001 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/DummyQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.AbstractQueryBuilder; @@ -67,6 +68,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java index eee0c1b05cdc8..9eb9a76d1e1ed 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/search/aggregations/AggregatorTestCase.java @@ -43,6 +43,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.common.CheckedBiConsumer; import org.elasticsearch.common.TriConsumer; @@ -586,8 +587,8 @@ private A searchAndReduce( } else { Weight weight = subSearcher.createWeight(rewritten, ScoreMode.COMPLETE, 1f); subSearcher.search(weight, a.asCollector()); + a.postCollection(); } - a.postCollection(); assertEquals(shouldBeCached, context.isCacheable()); internalAggs.add(a.buildTopLevel()); } finally { @@ -612,7 +613,6 @@ private A searchAndReduce( root.preCollection(); aggregators.add(root); new TimeSeriesIndexSearcher(searcher, List.of()).search(rewritten, MultiBucketCollector.wrap(true, List.of(root))); - root.postCollection(); } else { CollectorManager collectorManager = new CollectorManager<>() { @Override @@ -866,7 +866,7 @@ protected void withAggregator( AggregationBuilder aggregationBuilder, Query query, CheckedConsumer buildIndex, - CheckedBiConsumer verify, + CheckedBiConsumer verify, MappedFieldType... fieldTypes ) throws IOException { try (Directory directory = newDirectory()) { @@ -878,9 +878,8 @@ protected void withAggregator( DirectoryReader unwrapped = DirectoryReader.open(directory); DirectoryReader indexReader = wrapDirectoryReader(unwrapped) ) { - IndexSearcher searcher = newIndexSearcher(indexReader); - try (AggregationContext context = createAggregationContext(searcher, query, fieldTypes)) { - verify.accept(searcher, createAggregator(aggregationBuilder, context)); + try (AggregationContext context = createAggregationContext(indexReader, query, fieldTypes)) { + verify.accept(indexReader, createAggregator(aggregationBuilder, context)); } } } @@ -1417,7 +1416,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankBuilder.java b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankBuilder.java index 411a226d08951..71a943413ba2d 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankBuilder.java +++ b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ConstructingObjectParser; @@ -58,7 +59,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_8_0; + return TransportVersions.V_8_8_0; } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java index c43a16cd218e3..ab66d021497d5 100644 --- a/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java +++ b/test/framework/src/main/java/org/elasticsearch/search/rank/TestRankShardResult.java @@ -9,6 +9,7 @@ package org.elasticsearch.search.rank; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -33,7 +34,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_8_0; + return TransportVersions.V_8_8_0; } @Override diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java index b4aae75b2c8fe..926f9dc2b2a8a 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobStoreWrapper.java @@ -12,6 +12,7 @@ import org.elasticsearch.common.blobstore.BlobStore; import java.io.IOException; +import java.util.Iterator; public class BlobStoreWrapper implements BlobStore { @@ -26,6 +27,11 @@ public BlobContainer blobContainer(BlobPath path) { return delegate.blobContainer(path); } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + delegate.deleteBlobsIgnoringIfNotExists(blobNames); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 0f3588eee4de6..7711164eebf75 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -24,9 +24,11 @@ import org.elasticsearch.action.ActionFuture; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteResponse; +import org.elasticsearch.action.admin.cluster.allocation.ClusterAllocationExplainResponse; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.cluster.node.hotthreads.NodeHotThreads; +import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsResponse; import org.elasticsearch.action.admin.cluster.node.info.NodeInfo; import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; @@ -53,6 +55,8 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException; import org.elasticsearch.action.support.DestructiveOperations; import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.action.support.RefCountingListener; import org.elasticsearch.client.RestClient; import org.elasticsearch.client.RestClientBuilder; import org.elasticsearch.client.internal.AdminClient; @@ -174,8 +178,10 @@ import java.util.concurrent.Callable; import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -960,32 +966,54 @@ private ClusterHealthStatus ensureColor( // been removed by the master so that the health check applies to the set of nodes we expect to be part of the cluster. .waitForNodes(Integer.toString(cluster().size())); - ClusterHealthResponse actionGet = clusterAdmin().health(healthRequest).actionGet(); - if (actionGet.isTimedOut()) { - final String hotThreads = clusterAdmin().prepareNodesHotThreads() - .setThreads(99999) - .setIgnoreIdleThreads(false) - .get() - .getNodes() - .stream() - .map(NodeHotThreads::getHotThreads) - .collect(Collectors.joining("\n")); + final ClusterHealthResponse clusterHealthResponse = clusterAdmin().health(healthRequest).actionGet(); + if (clusterHealthResponse.isTimedOut()) { + final var allocationExplainRef = new AtomicReference(); + final var clusterStateRef = new AtomicReference(); + final var pendingTasksRef = new AtomicReference(); + final var hotThreadsRef = new AtomicReference(); + + final var detailsFuture = new PlainActionFuture(); + try (var listeners = new RefCountingListener(detailsFuture)) { + clusterAdmin().prepareAllocationExplain().execute(listeners.acquire(allocationExplainRef::set)); + clusterAdmin().prepareState().execute(listeners.acquire(clusterStateRef::set)); + clusterAdmin().preparePendingClusterTasks().execute(listeners.acquire(pendingTasksRef::set)); + clusterAdmin().prepareNodesHotThreads() + .setThreads(9999) + .setIgnoreIdleThreads(false) + .execute(listeners.acquire(hotThreadsRef::set)); + } + + try { + detailsFuture.get(60, TimeUnit.SECONDS); + } catch (Exception e) { + logger.error("failed to get full debug details within 60s timeout", e); + } + logger.info( - "{} timed out, cluster state:\n{}\npending tasks:\n{}\nhot threads:\n{}\n", + "{} timed out\nallocation explain:\n{}\ncluster state:\n{}\npending tasks:\n{}\nhot threads:\n{}\n", method, - clusterAdmin().prepareState().get().getState(), - clusterAdmin().preparePendingClusterTasks().get(), - hotThreads + safeFormat(allocationExplainRef.get(), r -> Strings.toString(r.getExplanation(), true, true)), + safeFormat(clusterStateRef.get(), r -> r.getState().toString()), + safeFormat(pendingTasksRef.get(), r -> Strings.toString(r, true, true)), + safeFormat( + hotThreadsRef.get(), + r -> r.getNodes().stream().map(NodeHotThreads::getHotThreads).collect(Collectors.joining("\n")) + ) ); fail("timed out waiting for " + color + " state"); } assertThat( - "Expected at least " + clusterHealthStatus + " but got " + actionGet.getStatus(), - actionGet.getStatus().value(), + "Expected at least " + clusterHealthStatus + " but got " + clusterHealthResponse.getStatus(), + clusterHealthResponse.getStatus().value(), lessThanOrEqualTo(clusterHealthStatus.value()) ); logger.debug("indices {} are {}", indices.length == 0 ? "[_all]" : indices, color); - return actionGet.getStatus(); + return clusterHealthResponse.getStatus(); + } + + private static String safeFormat(@Nullable T value, Function formatter) { + return value == null ? null : formatter.apply(value); } /** @@ -2047,8 +2075,9 @@ private NodeConfigurationSource getNodeConfigSource() { if (addMockTransportService()) { initialNodeSettings.put(NetworkModule.TRANSPORT_TYPE_KEY, getTestTransportType()); } - boolean eagerConcurrentSearch = eagerConcurrentSearch(); - if (eagerConcurrentSearch) { + boolean enableConcurrentSearch = enableConcurrentSearch(); + if (enableConcurrentSearch) { + initialNodeSettings.put(SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), true); initialNodeSettings.put(SearchService.MINIMUM_DOCS_PER_SLICE.getKey(), 1); } return new NodeConfigurationSource() { @@ -2067,7 +2096,7 @@ public Path nodeConfigPath(int nodeOrdinal) { @Override public Collection> nodePlugins() { - if (eagerConcurrentSearch) { + if (enableConcurrentSearch) { List> plugins = new ArrayList<>(ESIntegTestCase.this.nodePlugins()); plugins.add(ConcurrentSearchTestPlugin.class); return plugins; @@ -2086,11 +2115,11 @@ protected boolean addMockTransportService() { } /** - * Whether we'd like to increase the likelihood of leveraging inter-segment search concurrency, by creating multiple slices - * with a low amount of documents in them, which would not be allowed in production. + * Whether we'd like to enable inter-segment search concurrency and increase the likelihood of leveraging it, by creating multiple + * slices with a low amount of documents in them, which would not be allowed in production. * Default is true, can be disabled if it causes problems in specific tests. */ - protected boolean eagerConcurrentSearch() { + protected boolean enableConcurrentSearch() { return true; } @@ -2174,7 +2203,7 @@ public List getTransportInterceptors( @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java index 2b13c9b67d345..76ed45e2bbbe5 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java @@ -227,11 +227,7 @@ protected List filteredWarnings() { private Node newNode() { final Path tempDir = createTempDir(); final String nodeName = nodeSettings().get(Node.NODE_NAME_SETTING.getKey(), "node_s_0"); - boolean eagerConcurrentSearch = eagerConcurrentSearch(); - Settings concurrentSetting = eagerConcurrentSearch - ? Settings.builder().put(SearchService.MINIMUM_DOCS_PER_SLICE.getKey(), 1).build() - : Settings.EMPTY; - Settings settings = Settings.builder() + Settings.Builder settingBuilder = Settings.builder() .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", random().nextLong())) .put(DestructiveOperations.REQUIRES_NAME_SETTING.getKey(), false) .put(Environment.PATH_HOME_SETTING.getKey(), tempDir) @@ -254,9 +250,14 @@ private Node newNode() { .put(HierarchyCircuitBreakerService.USE_REAL_MEMORY_USAGE_SETTING.getKey(), false) .putList(DISCOVERY_SEED_HOSTS_SETTING.getKey()) // empty list disables a port scan for other nodes .putList(INITIAL_MASTER_NODES_SETTING.getKey(), nodeName) - .put(nodeSettings()) // allow test cases to provide their own settings or override these - .put(concurrentSetting) - .build(); + .put(nodeSettings());// allow test cases to provide their own settings or override these + + boolean enableConcurrentSearch = enableConcurrentSearch(); + if (enableConcurrentSearch) { + settingBuilder.put(SearchService.QUERY_PHASE_PARALLEL_COLLECTION_ENABLED.getKey(), true) + .put(SearchService.MINIMUM_DOCS_PER_SLICE.getKey(), 1); + } + Settings settings = settingBuilder.build(); Collection> plugins = new ArrayList<>(getPlugins()); if (plugins.contains(getTestTransportPlugin()) == false) { @@ -265,7 +266,7 @@ private Node newNode() { if (addMockHttpTransport()) { plugins.add(MockHttpTransport.TestPlugin.class); } - if (eagerConcurrentSearch) { + if (enableConcurrentSearch) { plugins.add(ConcurrentSearchTestPlugin.class); } plugins.add(MockScriptService.TestPlugin.class); @@ -445,11 +446,11 @@ protected void ensureNoInitializingShards() { } /** - * Whether we'd like to increase the likelihood of leveraging inter-segment search concurrency, by creating multiple slices - * with a low amount of documents in them, which would not be allowed in production. + * Whether we'd like to enable inter-segment search concurrency and increase the likelihood of leveraging it, by creating multiple + * slices with a low amount of documents in them, which would not be allowed in production. * Default is true, can be disabled if it causes problems in specific tests. */ - protected boolean eagerConcurrentSearch() { + protected boolean enableConcurrentSearch() { return true; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java index 8e45fb943d916..49f5ab1e8c6fa 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESTestCase.java @@ -1522,7 +1522,7 @@ public static T copyWriteable( Writeable.Reader reader, TransportVersion version ) throws IOException { - return copyInstance(original, namedWriteableRegistry, (out, value) -> value.writeTo(out), reader, version); + return copyInstance(original, namedWriteableRegistry, StreamOutput::writeWriteable, reader, version); } /** diff --git a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java index 8b579666fd767..008f8511ee127 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/InternalAggregationTestCase.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -731,7 +732,7 @@ public BucketCardinality bucketCardinality() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TransportVersionUtils.java b/test/framework/src/main/java/org/elasticsearch/test/TransportVersionUtils.java index 93e43f2266a0e..689229e354d96 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TransportVersionUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TransportVersionUtils.java @@ -9,6 +9,7 @@ package org.elasticsearch.test; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.core.Nullable; import java.util.Collections; @@ -120,6 +121,6 @@ public static TransportVersion getNextVersion(TransportVersion version, boolean /** Returns a random {@code TransportVersion} that is compatible with {@link TransportVersion#current()} */ public static TransportVersion randomCompatibleVersion(Random random) { - return randomVersionBetween(random, TransportVersion.MINIMUM_COMPATIBLE, TransportVersion.current()); + return randomVersionBetween(random, TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java index d46ec2e164acf..43cb5b78c1f9f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java +++ b/test/framework/src/main/java/org/elasticsearch/test/transport/MockTransportService.java @@ -561,7 +561,7 @@ protected void doRun() throws IOException { runnable.run(); } else { requestsToSendWhenCleared.add(runnable); - threadPool.schedule(runnable, delay, ThreadPool.Names.GENERIC); + threadPool.schedule(runnable, delay, threadPool.generic()); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java index 6ffe78ba493a2..4a683c3f7dd57 100644 --- a/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/transport/AbstractSimpleTransportTestCase.java @@ -15,6 +15,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -297,7 +298,7 @@ public void tearDown() throws Exception { } } - public void assertNumHandshakes(long expected, Transport transport) { + public static void assertNumHandshakes(long expected, Transport transport) { if (transport instanceof TcpTransport) { assertEquals(expected, ((TcpTransport) transport).getNumHandshakes()); } @@ -2260,7 +2261,7 @@ public void testRegisterHandlerTwice() { public void testHandshakeWithIncompatVersion() { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); - TransportVersion transportVersion = TransportVersion.fromId(TransportVersion.MINIMUM_COMPATIBLE.id() - 1); + TransportVersion transportVersion = TransportVersion.fromId(TransportVersions.MINIMUM_COMPATIBLE.id() - 1); try ( MockTransportService service = buildService( "TS_C", @@ -2292,7 +2293,7 @@ public void testHandshakeUpdatesVersion() throws IOException { assumeTrue("only tcp transport has a handshake method", serviceA.getOriginalTransport() instanceof TcpTransport); TransportVersion transportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersion.current() ); try ( diff --git a/test/framework/src/main/java/org/elasticsearch/transport/TestOutboundRequestMessage.java b/test/framework/src/main/java/org/elasticsearch/transport/TestOutboundRequestMessage.java new file mode 100644 index 0000000000000..bdef107cec4e4 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/transport/TestOutboundRequestMessage.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.transport; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.util.concurrent.ThreadContext; + +import java.io.IOException; + +public class TestOutboundRequestMessage extends OutboundMessage.Request { + public TestOutboundRequestMessage( + ThreadContext threadContext, + Writeable message, + TransportVersion version, + String action, + long requestId, + boolean isHandshake, + Compression.Scheme compressionScheme + ) { + super(threadContext, message, version, action, requestId, isHandshake, compressionScheme); + + } + + @Override + public BytesReference serialize(RecyclerBytesStreamOutput bytesStream) throws IOException { + return super.serialize(bytesStream); + } +} diff --git a/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java b/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java index 73e20520165a9..fb90b6b87f219 100644 --- a/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java +++ b/test/framework/src/test/java/org/elasticsearch/common/util/concurrent/DeterministicTaskQueueTests.java @@ -19,7 +19,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import static org.elasticsearch.threadpool.ThreadPool.Names.GENERIC; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.empty; @@ -273,14 +272,14 @@ public void testThreadPoolSchedulesFutureTasks() { final ThreadPool threadPool = taskQueue.getThreadPool(); final long delayMillis = randomLongBetween(1, 100); - threadPool.schedule(() -> strings.add("deferred"), TimeValue.timeValueMillis(delayMillis), GENERIC); + threadPool.schedule(() -> strings.add("deferred"), TimeValue.timeValueMillis(delayMillis), threadPool.generic()); assertFalse(taskQueue.hasRunnableTasks()); assertTrue(taskQueue.hasDeferredTasks()); - threadPool.schedule(() -> strings.add("runnable"), TimeValue.ZERO, GENERIC); + threadPool.schedule(() -> strings.add("runnable"), TimeValue.ZERO, threadPool.generic()); assertTrue(taskQueue.hasRunnableTasks()); - threadPool.schedule(() -> strings.add("also runnable"), TimeValue.MINUS_ONE, GENERIC); + threadPool.schedule(() -> strings.add("also runnable"), TimeValue.MINUS_ONE, threadPool.generic()); taskQueue.runAllTasks(); @@ -290,8 +289,8 @@ public void testThreadPoolSchedulesFutureTasks() { final long delayMillis1 = randomLongBetween(2, 100); final long delayMillis2 = randomLongBetween(1, delayMillis1 - 1); - threadPool.schedule(() -> strings.add("further deferred"), TimeValue.timeValueMillis(delayMillis1), GENERIC); - threadPool.schedule(() -> strings.add("not quite so deferred"), TimeValue.timeValueMillis(delayMillis2), GENERIC); + threadPool.schedule(() -> strings.add("further deferred"), TimeValue.timeValueMillis(delayMillis1), threadPool.generic()); + threadPool.schedule(() -> strings.add("not quite so deferred"), TimeValue.timeValueMillis(delayMillis2), threadPool.generic()); assertFalse(taskQueue.hasRunnableTasks()); assertTrue(taskQueue.hasDeferredTasks()); @@ -303,7 +302,7 @@ public void testThreadPoolSchedulesFutureTasks() { final Scheduler.Cancellable cancelledBeforeExecution = threadPool.schedule( () -> strings.add("cancelled before execution"), cancelledDelay, - "" + threadPool.generic() ); cancelledBeforeExecution.cancel(); @@ -372,7 +371,7 @@ public void testThreadPoolSchedulesPeriodicFutureTasks() { Scheduler.Cancellable cancellable = threadPool.scheduleWithFixedDelay( () -> strings.add("periodic-" + counter.getAndIncrement()), TimeValue.timeValueMillis(intervalMillis), - GENERIC + threadPool.generic() ); assertFalse(taskQueue.hasRunnableTasks()); assertTrue(taskQueue.hasDeferredTasks()); diff --git a/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchSingleNodeTests.java b/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchSingleNodeTests.java index b1be7bb3f1a4f..5bb393ff70e83 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchSingleNodeTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchSingleNodeTests.java @@ -19,9 +19,9 @@ public class ConcurrentSearchSingleNodeTests extends ESSingleNodeTestCase { - private final boolean eagerConcurrentSearch = randomBoolean(); + private final boolean concurrentSearch = randomBoolean(); - public void testEagerConcurrentSearch() throws IOException { + public void testConcurrentSearch() throws IOException { client().admin().indices().prepareCreate("index").get(); IndicesService indicesService = getInstanceFromNode(IndicesService.class); IndexService indexService = indicesService.iterator().next(); @@ -30,7 +30,7 @@ public void testEagerConcurrentSearch() throws IOException { ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shard.shardId(), 0L, AliasFilter.EMPTY); try (SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, TimeValue.MINUS_ONE)) { ContextIndexSearcher searcher = searchContext.searcher(); - if (eagerConcurrentSearch) { + if (concurrentSearch) { assertEquals(1, searcher.getMinimumDocsPerSlice()); } else { assertEquals(50_000, searcher.getMinimumDocsPerSlice()); @@ -39,7 +39,7 @@ public void testEagerConcurrentSearch() throws IOException { } @Override - protected boolean eagerConcurrentSearch() { - return eagerConcurrentSearch; + protected boolean enableConcurrentSearch() { + return concurrentSearch; } } diff --git a/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchTestPluginTests.java b/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchTestPluginTests.java index 3b27aa3b51138..29da297ce292e 100644 --- a/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchTestPluginTests.java +++ b/test/framework/src/test/java/org/elasticsearch/search/internal/ConcurrentSearchTestPluginTests.java @@ -20,9 +20,9 @@ @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 1) public class ConcurrentSearchTestPluginTests extends ESIntegTestCase { - private final boolean eagerConcurrentSearch = randomBoolean(); + private final boolean concurrentSearch = randomBoolean(); - public void testEagerConcurrentSearch() throws IOException { + public void testConcurrentSearch() throws IOException { client().admin().indices().prepareCreate("index").get(); IndicesService indicesService = internalCluster().getDataNodeInstance(IndicesService.class); IndexService indexService = indicesService.iterator().next(); @@ -31,7 +31,7 @@ public void testEagerConcurrentSearch() throws IOException { ShardSearchRequest shardSearchRequest = new ShardSearchRequest(shard.shardId(), 0L, AliasFilter.EMPTY); try (SearchContext searchContext = searchService.createSearchContext(shardSearchRequest, TimeValue.MINUS_ONE)) { ContextIndexSearcher searcher = searchContext.searcher(); - if (eagerConcurrentSearch) { + if (concurrentSearch) { assertEquals(1, searcher.getMinimumDocsPerSlice()); } else { assertEquals(50_000, searcher.getMinimumDocsPerSlice()); @@ -40,7 +40,7 @@ public void testEagerConcurrentSearch() throws IOException { } @Override - protected boolean eagerConcurrentSearch() { - return eagerConcurrentSearch; + protected boolean enableConcurrentSearch() { + return concurrentSearch; } } diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java index 1265b0386cdb2..96178e621e018 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/LogType.java @@ -10,7 +10,8 @@ public enum LogType { SERVER("%s.log"), - SERVER_JSON("%s_server.json"); + SERVER_JSON("%s_server.json"), + AUDIT("%s_audit.json"); private final String filenameFormat; diff --git a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java index e3d68ab8178bb..c897a0026c6c7 100644 --- a/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java +++ b/test/test-clusters/src/main/java/org/elasticsearch/test/cluster/local/AbstractLocalClusterFactory.java @@ -418,7 +418,18 @@ private void writeConfiguration() { } private void copyExtraConfigFiles() { - spec.getExtraConfigFiles().forEach((fileName, resource) -> resource.writeTo(configDir.resolve(fileName))); + spec.getExtraConfigFiles().forEach((fileName, resource) -> { + final Path target = configDir.resolve(fileName); + final Path directory = target.getParent(); + if (Files.exists(directory) == false) { + try { + Files.createDirectories(directory); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + resource.writeTo(target); + }); } private void createKeystore() { diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java index 33681f474a07b..da6786c8341cc 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/ESClientYamlSuiteTestCase.java @@ -490,7 +490,7 @@ public void test() throws IOException { final XContentBuilder template = jsonBuilder(); template.startObject(); { - template.startArray("index_patterns").value("*").endArray(); + template.array("index_patterns", "*"); if (useComponentTemplate) { template.field("priority", 4); // relatively low priority, but hopefully uncommon enough not to conflict template.startObject("template"); diff --git a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/CloseToAssertion.java b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/CloseToAssertion.java index 557556abc743e..cff3deb2c1068 100644 --- a/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/CloseToAssertion.java +++ b/test/yaml-rest-runner/src/main/java/org/elasticsearch/test/rest/yaml/section/CloseToAssertion.java @@ -36,14 +36,14 @@ public static CloseToAssertion parse(XContentParser parser) throws IOException { throw new IllegalArgumentException("expected a map with value and error but got a map with " + map.size() + " fields"); } Object valObj = map.get("value"); - if (valObj instanceof Number == false) { - throw new IllegalArgumentException("value is missing or not a number"); + if (valObj == null) { + throw new IllegalArgumentException("value is missing"); } Object errObj = map.get("error"); if (errObj instanceof Number == false) { throw new IllegalArgumentException("error is missing or not a number"); } - return new CloseToAssertion(location, fieldValueTuple.v1(), ((Number) valObj).doubleValue(), ((Number) errObj).doubleValue()); + return new CloseToAssertion(location, fieldValueTuple.v1(), valObj, ((Number) errObj).doubleValue()); } else { throw new IllegalArgumentException( "expected a map with value and error but got " + fieldValueTuple.v2().getClass().getSimpleName() @@ -56,7 +56,7 @@ public static CloseToAssertion parse(XContentParser parser) throws IOException { private final double error; - public CloseToAssertion(XContentLocation location, String field, Double expectedValue, Double error) { + public CloseToAssertion(XContentLocation location, String field, Object expectedValue, Double error) { super(location, field, expectedValue); this.error = error; } @@ -69,9 +69,9 @@ public final double getError() { protected void doAssert(Object actualValue, Object expectedValue) { logger.trace("assert that [{}] is close to [{}] with error [{}] (field [{}])", actualValue, expectedValue, error, getField()); if (actualValue instanceof Number actualValueNumber) { - assertThat(actualValueNumber.doubleValue(), closeTo((Double) expectedValue, error)); + assertThat(actualValueNumber.doubleValue(), closeTo(((Number) expectedValue).doubleValue(), error)); } else { - throw new AssertionError("excpected a value close to " + expectedValue + " but got " + actualValue + ", which is not a number"); + throw new AssertionError("expected a value close to " + expectedValue + " but got " + actualValue + ", which is not a number"); } } } diff --git a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java index 1ac0bff285b96..705f403632f62 100644 --- a/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java +++ b/test/yaml-rest-runner/src/test/java/org/elasticsearch/test/rest/yaml/section/AssertionTests.java @@ -169,7 +169,7 @@ public void testInvalidCloseTo() throws Exception { parser = createParser(YamlXContent.yamlXContent, "{ field: { foo: 13, bar: 15 } }"); exception = expectThrows(IllegalArgumentException.class, () -> CloseToAssertion.parse(parser)); - assertThat(exception.getMessage(), equalTo("value is missing or not a number")); + assertThat(exception.getMessage(), equalTo("value is missing")); } public void testExists() throws IOException { diff --git a/x-pack/docs/build.gradle b/x-pack/docs/build.gradle index 35242258d5122..7ae1ec73d32c2 100644 --- a/x-pack/docs/build.gradle +++ b/x-pack/docs/build.gradle @@ -1,5 +1,3 @@ -import org.elasticsearch.gradle.internal.info.BuildParams - apply plugin: 'elasticsearch.docs-test' apply plugin: 'elasticsearch.rest-resources' @@ -29,18 +27,6 @@ restResources { } } -// TODO: Remove the following when the following features are released. These tests include new privileges only available under feature flags -// which require snapshot builds: -// * Data Stream Lifecycle. manage_data_stream_lifecycle privilege is only available with dlm_feature_flag_enabled set -// We disable these tests for snapshot builds to maintain release build coverage. -tasks.named("yamlRestTest").configure { - if (BuildParams.isSnapshotBuild()) { - systemProperty 'tests.rest.blacklist', '*/get-builtin-privileges/*' - } else { - systemProperty 'tests.rest.blacklist', ['*/create-cross-cluster-api-key/*', '*/update-cross-cluster-api-key/*'].join(',') - } -} - testClusters.matching { it.name == "yamlRestTest" }.configureEach { extraConfigFile 'op-jwks.json', project(':x-pack:test:idp-fixture').file("oidc/op-jwks.json") extraConfigFile 'idp-docs-metadata.xml', project(':x-pack:test:idp-fixture').file("idp/shibboleth-idp/metadata/idp-docs-metadata.xml") @@ -725,6 +711,14 @@ tasks.named("buildRestTests").configure { buildRestTests -> "email" : "jacknich@example.com", "metadata" : { "intelligence" : 7 } } + - do: + security.activate_user_profile: + body: > + { + "grant_type": "password", + "username": "jacknich", + "password" : "l0ng-r4nd0m-p@ssw0rd" + } ''' setups['app0102_privileges'] = ''' - do: diff --git a/x-pack/docs/en/rest-api/security.asciidoc b/x-pack/docs/en/rest-api/security.asciidoc index 413fa34e97a67..d88622db7006a 100644 --- a/x-pack/docs/en/rest-api/security.asciidoc +++ b/x-pack/docs/en/rest-api/security.asciidoc @@ -69,8 +69,8 @@ without requiring basic authentication: * <> * <> -Use the following APIs to create and update cross-cluster API keys for -API key based remote cluster access: +beta:[] Use the following APIs to create and update cross-cluster API keys for +<>: * <> * <> diff --git a/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc b/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc index e2d9d55415893..6cb00815c0ce7 100644 --- a/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-cross-cluster-api-key.asciidoc @@ -8,7 +8,7 @@ beta::[] Create Cross-Cluster API key ++++ -Creates an API key of the `cross_cluster` type for the API key based remote cluster access. +Creates an API key of the `cross_cluster` type for the <> access. A `cross_cluster` API key cannot be used to authenticate through the REST interface. On the contrary, a <> is meant to be used through the REST interface and cannot be used for the API key based remote cluster access. diff --git a/x-pack/docs/en/rest-api/security/create-roles.asciidoc b/x-pack/docs/en/rest-api/security/create-roles.asciidoc index 274c8579f4357..532ea60d3e46a 100644 --- a/x-pack/docs/en/rest-api/security/create-roles.asciidoc +++ b/x-pack/docs/en/rest-api/security/create-roles.asciidoc @@ -77,9 +77,8 @@ For more information, see `remote_indices`:: beta:[] (list) A list of remote indices permissions entries. + -- -// TODO: fix the link to new page of API key based remote clusters -NOTE: Remote indices are effective for remote clusters configured with the API key based model. -They have no effect for remote clusters configured with the certificate based model. +NOTE: Remote indices are effective for <>. +They have no effect for remote clusters configured with the <>. -- `clusters` (required)::: (list) A list of cluster aliases to which the permissions in this entry apply. diff --git a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc index 642b31ed64370..ce7263e4d46f3 100644 --- a/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-builtin-privileges.asciidoc @@ -128,6 +128,7 @@ A successful call returns an object with "cluster" and "index" fields. "index", "maintenance", "manage", + "manage_data_stream_lifecycle", "manage_follow_index", "manage_ilm", "manage_leader_index", diff --git a/x-pack/docs/en/rest-api/security/get-users.asciidoc b/x-pack/docs/en/rest-api/security/get-users.asciidoc index 05c4488e524c0..59a390f6f2538 100644 --- a/x-pack/docs/en/rest-api/security/get-users.asciidoc +++ b/x-pack/docs/en/rest-api/security/get-users.asciidoc @@ -35,6 +35,13 @@ For more information about the native realm, see usernames as a comma-separated list. If you omit this parameter, the API retrieves information about all users. +[[security-api-get-user-query-params]] +==== {api-query-parms-title} + +`with_profile_uid`:: +(Optional, boolean) Determines whether to retrieve the <> `uid`, +if exists, for the users. Defaults to `false`. + [[security-api-get-user-response-body]] ==== {api-response-body-title} @@ -74,6 +81,32 @@ GET /_security/user/jacknich } -------------------------------------------------- +To retrieve the user `profile_uid` as part of the response: + +[source,console] +-------------------------------------------------- +GET /_security/user/jacknich?with_profile_uid=true +-------------------------------------------------- +// TEST[continued] + +[source,console-result] +-------------------------------------------------- +{ + "jacknich": { + "username": "jacknich", + "roles": [ + "admin", "other_role1" + ], + "full_name": "Jack Nicholson", + "email": "jacknich@example.com", + "metadata": { "intelligence" : 7 }, + "enabled": true, + "profile_uid": "u_79HkWkwmnBH5gqFKwoxggWPjEBOur1zLPXQPEl1VBW0_0" + } +} +-------------------------------------------------- + + Omit the username to retrieve all users: [source,console] diff --git a/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc b/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc index 2ce200eb6ac73..f0dfb11f1c98b 100644 --- a/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc +++ b/x-pack/docs/en/rest-api/security/update-cross-cluster-api-key.asciidoc @@ -8,7 +8,7 @@ beta::[] Update Cross-Cluster API key ++++ -Update an existing cross-cluster API Key. +Update an existing cross-cluster API Key that is used for <> access. [[security-api-update-cross-cluster-api-key-request]] diff --git a/x-pack/docs/en/security/authorization/managing-roles.asciidoc b/x-pack/docs/en/security/authorization/managing-roles.asciidoc index 3b144f7baed19..25d1c20471e9c 100644 --- a/x-pack/docs/en/security/authorization/managing-roles.asciidoc +++ b/x-pack/docs/en/security/authorization/managing-roles.asciidoc @@ -31,10 +31,9 @@ A role is defined by the following JSON structure: <4> A list of indices permissions entries. This field is optional (missing `indices` privileges effectively mean no index level permissions). <5> A list of application privilege entries. This field is optional. -// TODO: fix the link to new page of API key based remote clusters <6> beta:[] A list of indices permissions entries for - <>. + <>. This field is optional (missing `remote_indices` privileges effectively mean no index level permissions for any API key based remote clusters). @@ -168,8 +167,7 @@ no effect, and will not grant any actions in the beta::[] -// TODO: fix the link to new page of API key based remote clusters -For remote clusters configured with the API key based model, remote indices privileges +For <>, remote indices privileges can be used to specify desired indices privileges for matching remote clusters. The final effective index privileges will be an intersection of the remote indices privileges and the <>'s indices privileges. diff --git a/x-pack/docs/en/security/authorization/privileges.asciidoc b/x-pack/docs/en/security/authorization/privileges.asciidoc index 186a45054ecbc..3df15c404fdbe 100644 --- a/x-pack/docs/en/security/authorization/privileges.asciidoc +++ b/x-pack/docs/en/security/authorization/privileges.asciidoc @@ -21,7 +21,7 @@ Privileges to create snapshots for existing repositories. Can also list and view details on existing repositories and snapshots. `cross_cluster_replication`:: -beta:[] Privileges to connect to remote clusters configured with the API key based model +beta:[] Privileges to connect to <> for cross-cluster replication. + -- @@ -32,7 +32,7 @@ to manage cross-cluster API keys. -- `cross_cluster_search`:: -beta:[] Privileges to connect to remote clusters configured with the API key based model +beta:[] Privileges to connect to <> for cross-cluster search. + -- @@ -301,13 +301,14 @@ requires the `manage` privilege as well, on both the index and the aliases names. `cross_cluster_replication`:: -beta:[] Privileges to perform cross-cluster replication for indices located on remote clusters -configured with the API key based model. This privilege should only be used for +beta:[] Privileges to perform cross-cluster replication for indices located on +<>. +This privilege should only be used for the `privileges` field of <>. `cross_cluster_replication_internal`:: -beta:[] Privileges to perform supporting actions for cross-cluster replication from remote clusters -configured with the API key based model. +beta:[] Privileges to perform supporting actions for cross-cluster replication from +<>. + -- NOTE: This privilege should _not_ be directly granted. It is used internally by diff --git a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc index edfd7cdb486dd..edbc4c610b272 100644 --- a/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc +++ b/x-pack/docs/en/watcher/example-watches/example-watch-clusterstatus.asciidoc @@ -38,8 +38,8 @@ PUT _watcher/watch/cluster_health_watch Since this watch runs so frequently, don't forget to <> when you're done experimenting. -To get the status of your cluster, you can call the Elasticsearch -{ref}//cluster-health.html[cluster health] API: +To get the status of your cluster, you can call the <>: [source,console] -------------------------------------------------- diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java index 3cebd756408fb..03ddfee9681fb 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/boxplot/BoxplotAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.analytics.boxplot; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -82,10 +83,8 @@ protected AggregationBuilder shallowCopy(AggregatorFactories.Builder factoriesBu public BoxplotAggregationBuilder(StreamInput in) throws IOException { super(in); compression = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { executionHint = in.readOptionalWriteable(TDigestExecutionHint::readFrom); - } else if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - executionHint = TDigestExecutionHint.readFrom(in); } else { executionHint = TDigestExecutionHint.HIGH_ACCURACY; } @@ -99,10 +98,8 @@ public Set metricNames() { @Override protected void innerWriteTo(StreamOutput out) throws IOException { out.writeDouble(compression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_018)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeOptionalWriteable(executionHint); - } else if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_014)) { - (executionHint == null ? TDigestExecutionHint.DEFAULT : executionHint).writeTo(out); } } @@ -202,6 +199,6 @@ public Optional> getOutputFieldNames() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java index 6061e2b30297f..5845da9f6e6c5 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/cumulativecardinality/CumulativeCardinalityPipelineAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.analytics.cumulativecardinality; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -130,6 +131,6 @@ protected boolean overrideBucketsPath() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java index 2cc9e1c7e4b1d..72688dafe3721 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/movingPercentiles/MovingPercentilesPipelineAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.analytics.movingPercentiles; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.pipeline.AbstractPipelineAggregationBuilder; @@ -131,6 +132,6 @@ protected boolean overrideBucketsPath() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java index 0189b55d43b1d..c098d75b29a7b 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/InternalMultiTerms.java @@ -67,7 +67,7 @@ public Bucket( protected Bucket(StreamInput in, List formats, List keyConverters, boolean showDocCountError) throws IOException { - terms = in.readList(StreamInput::readGenericValue); + terms = in.readCollectionAsList(StreamInput::readGenericValue); docCount = in.readVLong(); aggregations = InternalAggregations.readFrom(in); this.showDocCountError = showDocCountError; @@ -329,9 +329,9 @@ public InternalMultiTerms(StreamInput in) throws IOException { shardSize = readSize(in); showTermDocCountError = in.readBoolean(); otherDocCount = in.readVLong(); - formats = in.readList(in1 -> in1.readNamedWriteable(DocValueFormat.class)); - keyConverters = in.readList(in1 -> in1.readEnum(KeyConverter.class)); - buckets = in.readList(stream -> new Bucket(stream, formats, keyConverters, showTermDocCountError)); + formats = in.readCollectionAsList(in1 -> in1.readNamedWriteable(DocValueFormat.class)); + keyConverters = in.readCollectionAsList(in1 -> in1.readEnum(KeyConverter.class)); + buckets = in.readCollectionAsList(stream -> new Bucket(stream, formats, keyConverters, showTermDocCountError)); } @Override @@ -344,9 +344,9 @@ protected void doWriteTo(StreamOutput out) throws IOException { writeSize(shardSize, out); out.writeBoolean(showTermDocCountError); out.writeVLong(otherDocCount); - out.writeCollection(formats, StreamOutput::writeNamedWriteable); + out.writeNamedWriteableCollection(formats); out.writeCollection(keyConverters, StreamOutput::writeEnum); - out.writeList(buckets); + out.writeCollection(buckets); } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java index 1df2f6634ba1d..315a72ca1d645 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.analytics.multiterms; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; @@ -140,7 +141,7 @@ public MultiTermsAggregationBuilder( public MultiTermsAggregationBuilder(StreamInput in) throws IOException { super(in); - terms = in.readList(MultiValuesSourceFieldConfig::new); + terms = in.readCollectionAsList(MultiValuesSourceFieldConfig::new); order = InternalOrder.Streams.readOrder(in); collectMode = in.readOptionalWriteable(Aggregator.SubAggCollectionMode::readFromStream); bucketCountThresholds = new TermsAggregator.BucketCountThresholds(in); @@ -196,7 +197,7 @@ public BucketCardinality bucketCardinality() { @Override protected final void doWriteTo(StreamOutput out) throws IOException { - out.writeList(terms); + out.writeCollection(terms); order.writeTo(out); out.writeOptionalWriteable(collectMode); bucketCountThresholds.writeTo(out); @@ -438,6 +439,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_12_0; + return TransportVersions.V_7_12_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java index f7cb48028133f..5d6b0ec760acd 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/multiterms/MultiTermsAggregator.java @@ -177,7 +177,7 @@ static BytesRef packKey(List terms) { */ static List unpackTerms(BytesRef termsBytes) { try (StreamInput input = new BytesArray(termsBytes).streamInput()) { - return input.readList(StreamInput::readGenericValue); + return input.readCollectionAsList(StreamInput::readGenericValue); } catch (IOException ex) { throw ExceptionsHelper.convertToRuntime(ex); } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java index 3975e04de0c76..e61f01abcbedc 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/normalize/NormalizePipelineAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.analytics.normalize; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.DocValueFormat; @@ -156,6 +157,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java index b47ef33b4e68f..f3af195bc6fa1 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/InternalResetTrackingRate.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.analytics.rate; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -63,7 +63,7 @@ public InternalResetTrackingRate(StreamInput in) throws IOException { this.startTime = in.readLong(); this.endTime = in.readLong(); this.resetCompensation = in.readDouble(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_015)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { this.rateUnit = Rounding.DateTimeUnit.resolve(in.readByte()); } else { this.rateUnit = Rounding.DateTimeUnit.SECOND_OF_MINUTE; @@ -82,7 +82,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeLong(startTime); out.writeLong(endTime); out.writeDouble(resetCompensation); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_015) && rateUnit != null) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020) && rateUnit != null) { out.writeByte(rateUnit.getId()); } else { out.writeByte(Rounding.DateTimeUnit.SECOND_OF_MINUTE.getId()); diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java index c0b9650144fc6..37332fa49bfbf 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/rate/RateAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.analytics.rate; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -87,7 +88,7 @@ public RateAggregationBuilder(StreamInput in) throws IOException { } else { rateUnit = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { if (in.readBoolean()) { rateMode = in.readEnum(RateMode.class); } @@ -106,7 +107,7 @@ protected void innerWriteTo(StreamOutput out) throws IOException { } else { out.writeByte((byte) 0); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { if (rateMode != null) { out.writeBoolean(true); out.writeEnum(rateMode); @@ -215,6 +216,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_10_0; + return TransportVersions.V_7_10_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java index f5c1fbbb83bf3..1663a93a52235 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/InternalStringStats.java @@ -104,7 +104,7 @@ protected final void doWriteTo(StreamOutput out) throws IOException { out.writeVLong(totalLength); out.writeVInt(minLength); out.writeVInt(maxLength); - out.writeMap(charOccurrences, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(charOccurrences, StreamOutput::writeLong); } public String getWriteableName() { diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java index da02f9bcb7a5d..0be2ac9f24e62 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/stringstats/StringStatsAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.analytics.stringstats; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -158,6 +159,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_6_0; + return TransportVersions.V_7_6_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java index b8ca3c22fbb74..0194e39f47d44 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/InternalTopMetrics.java @@ -67,9 +67,9 @@ static InternalTopMetrics buildEmptyAggregation(String name, List metric public InternalTopMetrics(StreamInput in) throws IOException { super(in); sortOrder = SortOrder.readFromStream(in); - metricNames = in.readStringList(); + metricNames = in.readStringCollectionAsList(); size = in.readVInt(); - topMetrics = in.readList(TopMetric::new); + topMetrics = in.readCollectionAsList(TopMetric::new); } @Override @@ -77,7 +77,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { sortOrder.writeTo(out); out.writeStringCollection(metricNames); out.writeVInt(size); - out.writeList(topMetrics); + out.writeCollection(topMetrics); } @Override @@ -268,7 +268,7 @@ static class TopMetric implements Writeable, Comparable { TopMetric(StreamInput in) throws IOException { sortFormat = in.readNamedWriteable(DocValueFormat.class); sortValue = in.readNamedWriteable(SortValue.class); - metricValues = in.readList(s -> s.readOptionalWriteable(MetricValue::new)); + metricValues = in.readCollectionAsList(s -> s.readOptionalWriteable(MetricValue::new)); } @Override diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java index 931aba50d3eb2..15838a77a96ad 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/topmetrics/TopMetricsAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.analytics.topmetrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -152,10 +153,10 @@ public TopMetricsAggregationBuilder( public TopMetricsAggregationBuilder(StreamInput in) throws IOException { super(in); @SuppressWarnings({ "unchecked", "HiddenField" }) - List> sortBuilders = (List>) (List) in.readNamedWriteableList(SortBuilder.class); + List> sortBuilders = (List>) (List) in.readNamedWriteableCollectionAsList(SortBuilder.class); this.sortBuilders = sortBuilders; this.size = in.readVInt(); - this.metricFields = in.readList(MultiValuesSourceFieldConfig::new); + this.metricFields = in.readCollectionAsList(MultiValuesSourceFieldConfig::new); } @Override @@ -165,9 +166,9 @@ public boolean supportsSampling() { @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(sortBuilders); + out.writeNamedWriteableCollection(sortBuilders); out.writeVInt(size); - out.writeList(metricFields); + out.writeCollection(metricFields); } @Override @@ -237,6 +238,6 @@ public Optional> getOutputFieldNames() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } } diff --git a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregationBuilder.java b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregationBuilder.java index af367759d5ac6..7137302459c0b 100644 --- a/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregationBuilder.java +++ b/x-pack/plugin/analytics/src/main/java/org/elasticsearch/xpack/analytics/ttest/TTestAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.analytics.ttest; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilder; @@ -181,6 +182,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_8_0; + return TransportVersions.V_7_8_0; } } diff --git a/x-pack/plugin/async-search/qa/rest/src/main/java/org/elasticsearch/query/DeprecatedQueryBuilder.java b/x-pack/plugin/async-search/qa/rest/src/main/java/org/elasticsearch/query/DeprecatedQueryBuilder.java index ef9ca6a7763ca..4250a64f8d407 100644 --- a/x-pack/plugin/async-search/qa/rest/src/main/java/org/elasticsearch/query/DeprecatedQueryBuilder.java +++ b/x-pack/plugin/async-search/qa/rest/src/main/java/org/elasticsearch/query/DeprecatedQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -77,6 +78,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java index b988e1ce5e980..7e161c3154fa9 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/AsyncSearchActionIT.java @@ -11,6 +11,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -532,7 +533,7 @@ public void testCCSCheckCompatibility() throws Exception { SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(new SearchSourceBuilder().query(new DummyQueryBuilder() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_CCS_VERSION, true); + return TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); } }), indexName); diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/BlockingQueryBuilder.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/BlockingQueryBuilder.java index 3b0d849b2ddfc..e69e19e4566ea 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/BlockingQueryBuilder.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/BlockingQueryBuilder.java @@ -12,6 +12,7 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.lucene.search.Queries; @@ -130,7 +131,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } /** diff --git a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java index b97e9a39fa599..7558d76169016 100644 --- a/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java +++ b/x-pack/plugin/async-search/src/internalClusterTest/java/org/elasticsearch/xpack/search/CrossClusterAsyncSearchIT.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.search; -import org.apache.lucene.tests.util.LuceneTestCase; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.action.ActionFuture; @@ -16,7 +15,9 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.action.search.SearchAction; import org.elasticsearch.action.search.SearchResponse; +import org.elasticsearch.action.search.SearchType; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -27,6 +28,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.query.MatchAllQueryBuilder; +import org.elasticsearch.index.query.RangeQueryBuilder; import org.elasticsearch.index.shard.SearchOperationListener; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.SearchPlugin; @@ -35,6 +37,7 @@ import org.elasticsearch.search.internal.LegacyReaderContext; import org.elasticsearch.search.internal.ReaderContext; import org.elasticsearch.search.internal.SearchContext; +import org.elasticsearch.search.query.SlowRunningQueryBuilder; import org.elasticsearch.search.query.ThrowingQueryBuilder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskCancelledException; @@ -60,7 +63,6 @@ import org.hamcrest.Matchers; import org.junit.Before; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; @@ -74,17 +76,18 @@ import java.util.stream.Stream; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; -import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.matchesRegex; import static org.hamcrest.Matchers.not; -@LuceneTestCase.AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/98272") public class CrossClusterAsyncSearchIT extends AbstractMultiClustersTestCase { private static final String REMOTE_CLUSTER = "cluster_a"; + private static final long EARLIEST_TIMESTAMP = 1691348810000L; + private static final long LATEST_TIMESTAMP = 1691348820000L; @Override protected Collection remoteClusterAlias() { @@ -143,10 +146,17 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { SearchListenerPlugin.blockQueryPhase(); SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); - request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(1000)); + request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); @@ -169,12 +179,7 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { SearchListenerPlugin.waitSearchStarted(); SearchListenerPlugin.allowQueryPhase(); - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); - + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); @@ -237,21 +242,32 @@ public void testClusterDetailsAfterSuccessfulCCS() throws Exception { } } - public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception { + // CCS with a search where the timestamp of the query cannot match so should be SUCCESSFUL with all shards skipped + // during can-match + public void testCCSClusterDetailsWhereAllShardsSkippedInCanMatch() throws Exception { Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); String remoteIndex = (String) testClusterInfo.get("remote.index"); - boolean skipUnavailable = (Boolean) testClusterInfo.get("remote.skip_unavailable"); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); SearchListenerPlugin.blockQueryPhase(); SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); - // shardId -1 means to throw the Exception on all shards, so should result in complete search failure - ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), -1); - request.getSearchRequest().source(new SearchSourceBuilder().query(queryBuilder).size(10)); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + RangeQueryBuilder rangeQueryBuilder = new RangeQueryBuilder("@timestamp").from(100).to(2000); + request.getSearchRequest().source(new SearchSourceBuilder().query(rangeQueryBuilder).size(10)); + + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(request.getSearchRequest()); AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); @@ -274,11 +290,78 @@ public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception SearchListenerPlugin.waitSearchStarted(); SearchListenerPlugin.allowQueryPhase(); - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); + { + AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); + assertNotNull(finishedResponse); + + SearchResponse.Clusters clusters = finishedResponse.getSearchResponse().getClusters(); + assertFalse("search cluster results should NOT be marked as partial", clusters.hasPartialResults()); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getSuccessful(), equalTo(2)); + assertThat(clusters.getSkipped(), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); + assertNotNull(localClusterSearchInfo); + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); + assertNotNull(remoteClusterSearchInfo); + + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + if (dfs) { + // no skipped shards locally when DFS_QUERY_THEN_FETCH is used + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + } else { + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(localNumShards - 1)); + } + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.SUCCESSFUL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + if (minimizeRoundtrips) { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards - 1)); + } else { + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(remoteNumShards)); + } + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + } + } + + public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + boolean skipUnavailable = (Boolean) testClusterInfo.get("remote.skip_unavailable"); + + SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); + request.setCcsMinimizeRoundtrips(randomBoolean()); + request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + request.setKeepOnCompletion(true); + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + // shardId -1 means to throw the Exception on all shards, so should result in complete search failure + ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), -1); + request.getSearchRequest().source(new SearchSourceBuilder().query(queryBuilder).size(10)); + + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(request.getSearchRequest()); + + AsyncSearchResponse response = submitAsyncSearch(request); + assertNotNull(response.getSearchResponse()); + + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); @@ -291,15 +374,7 @@ public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); assertNotNull(localClusterSearchInfo); assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.FAILED)); - assertNull(localClusterSearchInfo.getTotalShards()); - assertNull(localClusterSearchInfo.getSuccessfulShards()); - assertNull(localClusterSearchInfo.getSkippedShards()); - assertNull(localClusterSearchInfo.getFailedShards()); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(localClusterSearchInfo.getTook()); - assertFalse(localClusterSearchInfo.isTimedOut()); - ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertAllShardsFailed(minimizeRoundtrips, localClusterSearchInfo, localNumShards); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); assertNotNull(remoteClusterSearchInfo); @@ -307,15 +382,7 @@ public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertAllShardsFailed(minimizeRoundtrips, remoteClusterSearchInfo, remoteNumShards); } // check that the async_search/status response includes the same cluster details { @@ -328,15 +395,7 @@ public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); assertNotNull(localClusterSearchInfo); assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.FAILED)); - assertNull(localClusterSearchInfo.getTotalShards()); - assertNull(localClusterSearchInfo.getSuccessfulShards()); - assertNull(localClusterSearchInfo.getSkippedShards()); - assertNull(localClusterSearchInfo.getFailedShards()); - assertThat(localClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(localClusterSearchInfo.getTook()); - assertFalse(localClusterSearchInfo.isTimedOut()); - ShardSearchFailure localShardSearchFailure = localClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", localShardSearchFailure.reason().contains("index corrupted")); + assertAllShardsFailed(minimizeRoundtrips, localClusterSearchInfo, localNumShards); SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); assertNotNull(remoteClusterSearchInfo); @@ -344,15 +403,7 @@ public void testClusterDetailsAfterCCSWithFailuresOnAllShards() throws Exception ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertAllShardsFailed(minimizeRoundtrips, remoteClusterSearchInfo, remoteNumShards); } } @@ -366,9 +417,16 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except SearchListenerPlugin.blockQueryPhase(); SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } // shardId 0 means to throw the Exception only on shard 0; all others should work ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); request.getSearchRequest().source(new SearchSourceBuilder().query(queryBuilder).size(10)); @@ -394,15 +452,10 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneShardOnly() throws Except SearchListenerPlugin.waitSearchStarted(); SearchListenerPlugin.allowQueryPhase(); - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); - SearchResponse.Clusters clusters = finishedResponse.getSearchResponse().getClusters(); assertThat(clusters.getTotal(), equalTo(2)); assertThat(clusters.getSuccessful(), equalTo(2)); @@ -471,14 +524,23 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce String localIndex = (String) testClusterInfo.get("local.index"); String remoteIndex = (String) testClusterInfo.get("remote.index"); int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); boolean skipUnavailable = (Boolean) testClusterInfo.get("remote.skip_unavailable"); SearchListenerPlugin.blockQueryPhase(); SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + // throw Exception of all shards of remoteIndex, but against localIndex ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder( randomLong(), @@ -487,6 +549,8 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce ); request.getSearchRequest().source(new SearchSourceBuilder().query(queryBuilder).size(10)); + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(request.getSearchRequest()); + AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); assertTrue(response.isRunning()); @@ -507,11 +571,7 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce SearchListenerPlugin.waitSearchStarted(); SearchListenerPlugin.allowQueryPhase(); - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); @@ -537,11 +597,19 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + if (minimizeRoundtrips) { + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getSuccessfulShards()); + assertNull(remoteClusterSearchInfo.getSkippedShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + } else { + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); + } assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); @@ -571,11 +639,15 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + if (minimizeRoundtrips) { + assertNull(remoteClusterSearchInfo.getTotalShards()); + assertNull(remoteClusterSearchInfo.getFailedShards()); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); + } else { + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(remoteNumShards)); + } assertNull(remoteClusterSearchInfo.getTook()); assertFalse(remoteClusterSearchInfo.isTimedOut()); ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); @@ -583,6 +655,105 @@ public void testClusterDetailsAfterCCSWithFailuresOnOneClusterOnly() throws Exce } } + public void testCCSWithSearchTimeout() throws Exception { + Map testClusterInfo = setupTwoClusters(); + String localIndex = (String) testClusterInfo.get("local.index"); + String remoteIndex = (String) testClusterInfo.get("remote.index"); + int localNumShards = (Integer) testClusterInfo.get("local.num_shards"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); + + TimeValue searchTimeout = new TimeValue(100, TimeUnit.MILLISECONDS); + // query builder that will sleep for the specified amount of time in the query phase + SlowRunningQueryBuilder slowRunningQueryBuilder = new SlowRunningQueryBuilder(searchTimeout.millis() * 5); + SearchSourceBuilder sourceBuilder = new SearchSourceBuilder().query(slowRunningQueryBuilder).timeout(searchTimeout); + + SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); + request.setCcsMinimizeRoundtrips(randomBoolean()); + request.getSearchRequest().source(sourceBuilder); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); + request.getSearchRequest().allowPartialSearchResults(true); + request.setKeepOnCompletion(true); + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + + AsyncSearchResponse response = submitAsyncSearch(request); + assertNotNull(response.getSearchResponse()); + + waitForSearchTasksToFinish(); + + { + AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); + assertTrue(finishedResponse.getSearchResponse().isTimedOut()); + + SearchResponse.Clusters clusters = finishedResponse.getSearchResponse().getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getSuccessful(), equalTo(2)); + assertThat(clusters.getSkipped(), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); + assertNotNull(localClusterSearchInfo); + // PARTIAL expected since timedOut=true + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertTrue(localClusterSearchInfo.isTimedOut()); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); + assertNotNull(remoteClusterSearchInfo); + // PARTIAL expected since timedOut=true + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertTrue(remoteClusterSearchInfo.isTimedOut()); + } + // check that the async_search/status response includes the same cluster details + { + AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); + + SearchResponse.Clusters clusters = statusResponse.getClusters(); + assertThat(clusters.getTotal(), equalTo(2)); + assertThat(clusters.getSuccessful(), equalTo(2)); + assertThat(clusters.getSkipped(), equalTo(0)); + + SearchResponse.Cluster localClusterSearchInfo = clusters.getCluster(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY).get(); + assertNotNull(localClusterSearchInfo); + // PARTIAL expected since timedOut=true + assertThat(localClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(localClusterSearchInfo.getTotalShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSuccessfulShards(), equalTo(localNumShards)); + assertThat(localClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(localClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertTrue(localClusterSearchInfo.isTimedOut()); + + SearchResponse.Cluster remoteClusterSearchInfo = clusters.getCluster(REMOTE_CLUSTER).get(); + assertNotNull(remoteClusterSearchInfo); + // PARTIAL expected since timedOut=true + assertThat(remoteClusterSearchInfo.getStatus(), equalTo(SearchResponse.Cluster.Status.PARTIAL)); + assertThat(remoteClusterSearchInfo.getTotalShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSuccessfulShards(), equalTo(remoteNumShards)); + assertThat(remoteClusterSearchInfo.getSkippedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailedShards(), equalTo(0)); + assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(0)); + assertThat(localClusterSearchInfo.getTook().millis(), greaterThanOrEqualTo(0L)); + assertTrue(remoteClusterSearchInfo.isTimedOut()); + } + } + public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { // for remote-only queries, we can't use the SearchListenerPlugin since that listens for search // stage on the local cluster, so we only test final state of the search response @@ -594,19 +765,22 @@ public void testRemoteClusterOnlyCCSSuccessfulResult() throws Exception { // search only the remote cluster SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); - request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(1000)); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); @@ -664,22 +838,24 @@ public void testRemoteClusterOnlyCCSWithFailuresOnOneShardOnly() throws Exceptio int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } // shardId 0 means to throw the Exception only on shard 0; all others should work ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), 0); request.getSearchRequest().source(new SearchSourceBuilder().query(queryBuilder).size(10)); AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); @@ -734,26 +910,31 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { Map testClusterInfo = setupTwoClusters(); String remoteIndex = (String) testClusterInfo.get("remote.index"); + int remoteNumShards = (Integer) testClusterInfo.get("remote.num_shards"); boolean skipUnavailable = (Boolean) testClusterInfo.get("remote.skip_unavailable"); SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(REMOTE_CLUSTER + ":" + remoteIndex); - request.setCcsMinimizeRoundtrips(true); + request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } + boolean dfs = randomBoolean(); + if (dfs) { + request.getSearchRequest().searchType(SearchType.DFS_QUERY_THEN_FETCH); + } + // shardId -1 means to throw the Exception on all shards, so should result in complete search failure ThrowingQueryBuilder queryBuilder = new ThrowingQueryBuilder(randomLong(), new IllegalStateException("index corrupted"), -1); request.getSearchRequest().source(new SearchSourceBuilder().query(queryBuilder).size(10)); + boolean minimizeRoundtrips = TransportSearchAction.shouldMinimizeRoundtrips(request.getSearchRequest()); + AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); - assertTrue(response.isRunning()); - - assertBusy(() -> { - AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); - assertFalse(statusResponse.isRunning()); - assertNotNull(statusResponse.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); { AsyncSearchResponse finishedResponse = getAsyncSearch(response.getId()); @@ -770,15 +951,7 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertAllShardsFailed(minimizeRoundtrips, remoteClusterSearchInfo, remoteNumShards); } // check that the async_search/status response includes the same cluster details { @@ -796,15 +969,7 @@ public void testRemoteClusterOnlyCCSWithFailuresOnAllShards() throws Exception { ? SearchResponse.Cluster.Status.SKIPPED : SearchResponse.Cluster.Status.FAILED; assertThat(remoteClusterSearchInfo.getStatus(), equalTo(expectedStatus)); - assertNull(remoteClusterSearchInfo.getTotalShards()); - assertNull(remoteClusterSearchInfo.getSuccessfulShards()); - assertNull(remoteClusterSearchInfo.getSkippedShards()); - assertNull(remoteClusterSearchInfo.getFailedShards()); - assertThat(remoteClusterSearchInfo.getFailures().size(), equalTo(1)); - assertNull(remoteClusterSearchInfo.getTook()); - assertFalse(remoteClusterSearchInfo.isTimedOut()); - ShardSearchFailure remoteShardSearchFailure = remoteClusterSearchInfo.getFailures().get(0); - assertTrue("should have 'index corrupted' in reason", remoteShardSearchFailure.reason().contains("index corrupted")); + assertAllShardsFailed(minimizeRoundtrips, remoteClusterSearchInfo, remoteNumShards); } } @@ -819,8 +984,11 @@ public void testCancelViaTasksAPI() throws Exception { request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } request.getSearchRequest().allowPartialSearchResults(false); - request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(1000)); + request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); @@ -904,18 +1072,8 @@ public void testCancelViaTasksAPI() throws Exception { } assertBusy(() -> assertTrue(cancelFuture.isDone())); - assertBusy(() -> { - final Iterable transportServices = cluster(REMOTE_CLUSTER).getInstances(TransportService.class); - for (TransportService transportService : transportServices) { - assertThat(transportService.getTaskManager().getBannedTaskIds(), Matchers.empty()); - } - }); - // wait until search status endpoint reports it as completed - assertBusy(() -> { - AsyncStatusResponse statusResponseAfterCompletion = getAsyncStatus(response.getId()); - assertNotNull(statusResponseAfterCompletion.getCompletionStatus()); - }); + waitForSearchTasksToFinish(); AsyncStatusResponse statusResponseAfterCompletion = getAsyncStatus(response.getId()); assertTrue(statusResponseAfterCompletion.isPartial()); @@ -938,7 +1096,7 @@ public void testCancelViaTasksAPI() throws Exception { ChunkedToXContent.wrapAsToXContent(searchResponseAfterCompletion) .toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS) ); - assertThat(json, containsString("task cancelled [by user request]")); + assertThat(json, matchesRegex(".*task (was)?\s*cancelled.*")); } public void testCancelViaAsyncSearchDelete() throws Exception { @@ -952,8 +1110,11 @@ public void testCancelViaAsyncSearchDelete() throws Exception { request.setCcsMinimizeRoundtrips(randomBoolean()); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.setKeepOnCompletion(true); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } request.getSearchRequest().allowPartialSearchResults(false); - request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(1000)); + request.getSearchRequest().source(new SearchSourceBuilder().query(new MatchAllQueryBuilder()).size(10)); AsyncSearchResponse response = submitAsyncSearch(request); assertNotNull(response.getSearchResponse()); @@ -1031,16 +1192,11 @@ public void testCancelViaAsyncSearchDelete() throws Exception { SearchListenerPlugin.allowQueryPhase(); } + waitForSearchTasksToFinish(); + assertBusy(() -> expectThrows(ExecutionException.class, () -> getAsyncStatus(response.getId()))); - assertBusy(() -> { - final Iterable transportServices = cluster(REMOTE_CLUSTER).getInstances(TransportService.class); - for (TransportService transportService : transportServices) { - assertThat(transportService.getTaskManager().getBannedTaskIds(), Matchers.empty()); - } - }); } - @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/97286") public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws Exception { Map testClusterInfo = setupTwoClusters(); String localIndex = (String) testClusterInfo.get("local.index"); @@ -1056,6 +1212,9 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws SubmitAsyncSearchRequest request = new SubmitAsyncSearchRequest(localIndex, REMOTE_CLUSTER + ":" + remoteIndex); request.setCcsMinimizeRoundtrips(randomBoolean()); request.getSearchRequest().source(sourceBuilder); + if (randomBoolean()) { + request.setBatchedReduceSize(randomIntBetween(2, 256)); + } request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); request.getSearchRequest().allowPartialSearchResults(false); request.setWaitForCompletionTimeout(TimeValue.timeValueMillis(1)); @@ -1102,26 +1261,24 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws // query phase has begun, so wait for query failure (due to timeout) SearchListenerPlugin.waitQueryFailure(); - // wait for the async_search task to be cancelled or unregistered + // wait for search tasks to complete and be unregistered assertBusy(() -> { - ListTasksResponse taskResponses = client().admin().cluster().prepareListTasks().setDetailed(true).get(); - List asyncSearchTaskInfos = new ArrayList<>(); - for (TaskInfo task : taskResponses.getTasks()) { - if (task.action().contains("search")) { - if (task.description().contains("async_search{indices[")) { - asyncSearchTaskInfos.add(task); - } - } - } - - if (asyncSearchTaskInfos.size() > 0) { - // if still present, and it is cancelled, then we can proceed with the test - assertTrue(asyncSearchTaskInfos.get(0).cancelled()); - } - // if not present, then it has been unregistered and the async search should no longer be running, so can proceed - }, 30, TimeUnit.SECONDS); + ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(SearchAction.INSTANCE.name()) + .get(); + List tasks = listTasksResponse.getTasks(); + assertThat(tasks.size(), equalTo(0)); - assertBusy(() -> { assertFalse(getAsyncStatus(response.getId()).isRunning()); }); + ListTasksResponse remoteTasksResponse = client(REMOTE_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(SearchAction.INSTANCE.name()) + .get(); + List remoteTasks = remoteTasksResponse.getTasks(); + assertThat(remoteTasks.size(), equalTo(0)); + }); AsyncStatusResponse statusResponse = getAsyncStatus(response.getId()); assertFalse(statusResponse.isRunning()); @@ -1129,6 +1286,28 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws assertEquals(0, statusResponse.getSkippedShards()); assertThat(statusResponse.getFailedShards(), greaterThanOrEqualTo(1)); + waitForSearchTasksToFinish(); + } + + private void waitForSearchTasksToFinish() throws Exception { + assertBusy(() -> { + ListTasksResponse listTasksResponse = client(LOCAL_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(SearchAction.INSTANCE.name()) + .get(); + List tasks = listTasksResponse.getTasks(); + assertThat(tasks.size(), equalTo(0)); + + ListTasksResponse remoteTasksResponse = client(REMOTE_CLUSTER).admin() + .cluster() + .prepareListTasks() + .setActions(SearchAction.INSTANCE.name()) + .get(); + List remoteTasks = remoteTasksResponse.getTasks(); + assertThat(remoteTasks.size(), equalTo(0)); + }); + assertBusy(() -> { final Iterable transportServices = cluster(REMOTE_CLUSTER).getInstances(TransportService.class); for (TransportService transportService : transportServices) { @@ -1137,6 +1316,26 @@ public void testCancellationViaTimeoutWithAllowPartialResultsSetToFalse() throws }); } + private static void assertAllShardsFailed(boolean minimizeRoundtrips, SearchResponse.Cluster cluster, int numShards) { + if (minimizeRoundtrips) { + assertNull(cluster.getTotalShards()); + assertNull(cluster.getSuccessfulShards()); + assertNull(cluster.getSkippedShards()); + assertNull(cluster.getFailedShards()); + assertThat(cluster.getFailures().size(), equalTo(1)); + } else { + assertThat(cluster.getTotalShards(), equalTo(numShards)); + assertThat(cluster.getSuccessfulShards(), equalTo(0)); + assertThat(cluster.getSkippedShards(), equalTo(0)); + assertThat(cluster.getFailedShards(), equalTo(numShards)); + assertThat(cluster.getFailures().size(), equalTo(numShards)); + } + assertNull(cluster.getTook()); + assertFalse(cluster.isTimedOut()); + ShardSearchFailure shardSearchFailure = cluster.getFailures().get(0); + assertTrue("should have 'index corrupted' in reason", shardSearchFailure.reason().contains("index corrupted")); + } + protected AsyncSearchResponse submitAsyncSearch(SubmitAsyncSearchRequest request) throws ExecutionException, InterruptedException { return client(LOCAL_CLUSTER).execute(SubmitAsyncSearchAction.INSTANCE, request).get(); } @@ -1155,23 +1354,31 @@ protected AcknowledgedResponse deleteAsyncSearch(String id) throws ExecutionExce private Map setupTwoClusters() { String localIndex = "demo"; - int numShardsLocal = randomIntBetween(3, 6); - Settings localSettings = indexSettings(numShardsLocal, 0).build(); - assertAcked(client(LOCAL_CLUSTER).admin().indices().prepareCreate(localIndex).setSettings(localSettings)); + int numShardsLocal = randomIntBetween(2, 12); + Settings localSettings = indexSettings(numShardsLocal, randomIntBetween(0, 1)).build(); + assertAcked( + client(LOCAL_CLUSTER).admin() + .indices() + .prepareCreate(localIndex) + .setSettings(localSettings) + .setMapping("@timestamp", "type=date", "f", "type=text") + ); indexDocs(client(LOCAL_CLUSTER), localIndex); String remoteIndex = "prod"; - int numShardsRemote = randomIntBetween(3, 6); + int numShardsRemote = randomIntBetween(2, 12); final InternalTestCluster remoteCluster = cluster(REMOTE_CLUSTER); remoteCluster.ensureAtLeastNumDataNodes(randomIntBetween(1, 3)); final Settings.Builder remoteSettings = Settings.builder(); remoteSettings.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShardsRemote); + remoteSettings.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 1)); assertAcked( client(REMOTE_CLUSTER).admin() .indices() .prepareCreate(remoteIndex) - .setSettings(Settings.builder().put(remoteSettings.build()).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)) + .setSettings(Settings.builder().put(remoteSettings.build())) + .setMapping("@timestamp", "type=date", "f", "type=text") ); assertFalse( client(REMOTE_CLUSTER).admin() @@ -1200,9 +1407,13 @@ private Map setupTwoClusters() { } private int indexDocs(Client client, String index) { - int numDocs = between(1, 10); + int numDocs = between(500, 1200); for (int i = 0; i < numDocs; i++) { - client.prepareIndex(index).setSource("f", "v").get(); + long ts = EARLIEST_TIMESTAMP + i; + if (i == numDocs - 1) { + ts = LATEST_TIMESTAMP; + } + client.prepareIndex(index).setSource("f", "v", "@timestamp", ts).get(); } client.admin().indices().prepareRefresh(index).get(); return numDocs; diff --git a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java index 2f3e5409642f7..d445a012ecee9 100644 --- a/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java +++ b/x-pack/plugin/async-search/src/main/java/org/elasticsearch/xpack/search/AsyncSearchTask.java @@ -13,6 +13,7 @@ import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksRequest; import org.elasticsearch.action.admin.cluster.node.tasks.cancel.CancelTasksResponse; +import org.elasticsearch.action.search.CCSSingleCoordinatorSearchProgressListener; import org.elasticsearch.action.search.SearchProgressActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -20,11 +21,13 @@ import org.elasticsearch.action.search.SearchShard; import org.elasticsearch.action.search.SearchTask; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.client.internal.Client; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.aggregations.AggregationReduceContext; import org.elasticsearch.search.aggregations.InternalAggregations; +import org.elasticsearch.search.query.QuerySearchResult; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.threadpool.Scheduler.Cancellable; @@ -237,7 +240,7 @@ private void internalAddCompletionListener(ActionListener l removeCompletionListener(id); listener.onResponse(getResponseWithHeaders()); } - }, waitForCompletion, "generic"); + }, waitForCompletion, threadPool.generic()); } catch (Exception exc) { listener.onFailure(exc); return; @@ -367,20 +370,32 @@ public static AsyncStatusResponse getStatusResponse(AsyncSearchTask asyncTask) { class Listener extends SearchProgressActionListener { + // needed when there's a single coordinator for all CCS search phases (minimize_roundtrips=false) + private CCSSingleCoordinatorSearchProgressListener delegate; + @Override - protected void onQueryResult(int shardIndex) { + protected void onQueryResult(int shardIndex, QuerySearchResult queryResult) { checkCancellation(); + if (delegate != null) { + delegate.onQueryResult(shardIndex, queryResult); + } } @Override protected void onFetchResult(int shardIndex) { checkCancellation(); + if (delegate != null) { + delegate.onFetchResult(shardIndex); + } } @Override protected void onQueryFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) { // best effort to cancel expired tasks checkCancellation(); + if (delegate != null) { + delegate.onQueryFailure(shardIndex, shardTarget, exc); + } searchResponse.get() .addQueryFailure( shardIndex, @@ -401,11 +416,27 @@ protected void onFetchFailure(int shardIndex, SearchShardTarget shardTarget, Exc // in which case the final response already includes results as well as shard fetch failures) } + /** + * onListShards is guaranteed to be the first SearchProgressListener method called and + * the search will not progress until this returns, so this is a safe place to initialize state + * that is needed for handling subsequent callbacks. + */ @Override - protected void onListShards(List shards, List skipped, Clusters clusters, boolean fetchPhase) { + protected void onListShards( + List shards, + List skipped, + Clusters clusters, + boolean fetchPhase, + TransportSearchAction.SearchTimeProvider timeProvider + ) { // best effort to cancel expired tasks checkCancellation(); + assert clusters.isCcsMinimizeRoundtrips() != null : "CCS minimize_roundtrips value must be set in this context"; ccsMinimizeRoundtrips = clusters.isCcsMinimizeRoundtrips(); + if (ccsMinimizeRoundtrips == false && clusters.hasClusterObjects()) { + delegate = new CCSSingleCoordinatorSearchProgressListener(); + delegate.onListShards(shards, skipped, clusters, fetchPhase, timeProvider); + } searchResponse.compareAndSet( null, new MutableSearchResponse(shards.size() + skipped.size(), skipped.size(), clusters, threadPool.getThreadContext()) @@ -417,6 +448,9 @@ protected void onListShards(List shards, List skipped, public void onPartialReduce(List shards, TotalHits totalHits, InternalAggregations aggregations, int reducePhase) { // best effort to cancel expired tasks checkCancellation(); + if (delegate != null) { + delegate.onPartialReduce(shards, totalHits, aggregations, reducePhase); + } // The way that the MutableSearchResponse will build the aggs. Supplier reducedAggs; if (aggregations == null) { @@ -444,6 +478,9 @@ public void onPartialReduce(List shards, TotalHits totalHits, Inter public void onFinalReduce(List shards, TotalHits totalHits, InternalAggregations aggregations, int reducePhase) { // best effort to cancel expired tasks checkCancellation(); + if (delegate != null) { + delegate.onFinalReduce(shards, totalHits, aggregations, reducePhase); + } searchResponse.get().updatePartialResponse(shards.size(), totalHits, () -> aggregations, reducePhase); } diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java index af2fd3022f769..aff63cd32976a 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchResponseTests.java @@ -465,6 +465,7 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept SearchResponse.Cluster updated = new SearchResponse.Cluster( localCluster.getClusterAlias(), localCluster.getIndexExpression(), + false, SearchResponse.Cluster.Status.SUCCESSFUL, 10, 10, @@ -482,6 +483,7 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept updated = new SearchResponse.Cluster( cluster0.getClusterAlias(), cluster0.getIndexExpression(), + false, SearchResponse.Cluster.Status.SUCCESSFUL, 8, 8, @@ -507,6 +509,7 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept updated = new SearchResponse.Cluster( cluster1.getClusterAlias(), cluster1.getIndexExpression(), + false, SearchResponse.Cluster.Status.SKIPPED, 2, 0, @@ -524,6 +527,7 @@ public void testToXContentWithCCSSearchResponseAfterCompletion() throws IOExcept updated = new SearchResponse.Cluster( cluster2.getClusterAlias(), cluster2.getIndexExpression(), + false, SearchResponse.Cluster.Status.PARTIAL, 8, 8, @@ -764,7 +768,7 @@ static SearchResponse.Clusters createCCSClusterObjects(int totalClusters, int re remoteClusterIndices.put("cluster_" + i, new OriginalIndices(new String[] { "foo", "bar*" }, IndicesOptions.lenientExpand())); } - return new SearchResponse.Clusters(localIndices, remoteClusterIndices, ccsMinimizeRoundtrips); + return new SearchResponse.Clusters(localIndices, remoteClusterIndices, ccsMinimizeRoundtrips, alias -> false); } static SearchResponse.Clusters createCCSClusterObjects( @@ -794,6 +798,7 @@ static SearchResponse.Clusters createCCSClusterObjects( updated = new SearchResponse.Cluster( localAlias, localRef.get().getIndexExpression(), + false, SearchResponse.Cluster.Status.SUCCESSFUL, 5, 5, @@ -808,6 +813,7 @@ static SearchResponse.Clusters createCCSClusterObjects( updated = new SearchResponse.Cluster( localAlias, localRef.get().getIndexExpression(), + false, SearchResponse.Cluster.Status.SKIPPED, 5, 0, @@ -822,6 +828,7 @@ static SearchResponse.Clusters createCCSClusterObjects( updated = new SearchResponse.Cluster( localAlias, localRef.get().getIndexExpression(), + false, SearchResponse.Cluster.Status.PARTIAL, 5, 2, @@ -848,6 +855,7 @@ static SearchResponse.Clusters createCCSClusterObjects( updated = new SearchResponse.Cluster( clusterAlias, clusterRef.get().getIndexExpression(), + false, SearchResponse.Cluster.Status.SUCCESSFUL, 5, 5, @@ -862,6 +870,7 @@ static SearchResponse.Clusters createCCSClusterObjects( updated = new SearchResponse.Cluster( clusterAlias, clusterRef.get().getIndexExpression(), + false, SearchResponse.Cluster.Status.SKIPPED, 5, 0, @@ -876,6 +885,7 @@ static SearchResponse.Clusters createCCSClusterObjects( updated = new SearchResponse.Cluster( clusterAlias, clusterRef.get().getIndexExpression(), + false, SearchResponse.Cluster.Status.PARTIAL, 5, 2, diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java index 2ef338e5f8982..678a6fb736720 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/AsyncSearchTaskTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.action.search.SearchShard; import org.elasticsearch.action.search.ShardSearchFailure; +import org.elasticsearch.action.search.TransportSearchAction; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; import org.elasticsearch.core.TimeValue; @@ -42,6 +43,7 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -56,7 +58,7 @@ public class AsyncSearchTaskTests extends ESTestCase { public void beforeTest() { threadPool = new TestThreadPool(getTestName()) { @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { if (throwOnSchedule) { throw new RuntimeException(); } @@ -70,6 +72,10 @@ public void afterTest() { threadPool.shutdownNow(); } + static TransportSearchAction.SearchTimeProvider createTimeProvider() { + return new TransportSearchAction.SearchTimeProvider(System.currentTimeMillis(), System.nanoTime(), System::nanoTime); + } + private AsyncSearchTask createAsyncSearchTask() { return new AsyncSearchTask( 0L, @@ -157,7 +163,8 @@ public void onFailure(Exception e) { thread.start(); } assertFalse(latch.await(numThreads * 2, TimeUnit.MILLISECONDS)); - task.getSearchProgressActionListener().onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false); + task.getSearchProgressActionListener() + .onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false, createTimeProvider()); latch.await(); } @@ -190,7 +197,7 @@ public void onFailure(Exception e) { public void testWithFailureAndGetResponseFailureDuringReduction() throws InterruptedException { AsyncSearchTask task = createAsyncSearchTask(); task.getSearchProgressActionListener() - .onListShards(Collections.emptyList(), Collections.emptyList(), SearchResponse.Clusters.EMPTY, false); + .onListShards(Collections.emptyList(), Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, createTimeProvider()); InternalAggregations aggs = InternalAggregations.from( Collections.singletonList( new StringTerms( @@ -255,7 +262,8 @@ public void testWaitForCompletion() throws InterruptedException { skippedShards.add(new SearchShard(null, new ShardId("0", "0", 1))); } int totalShards = numShards + numSkippedShards; - task.getSearchProgressActionListener().onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false); + task.getSearchProgressActionListener() + .onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false, createTimeProvider()); for (int i = 0; i < numShards; i++) { task.getSearchProgressActionListener() .onPartialReduce(shards.subList(i, i + 1), new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), null, 0); @@ -281,7 +289,8 @@ public void testWithFetchFailures() throws InterruptedException { skippedShards.add(new SearchShard(null, new ShardId("0", "0", 1))); } int totalShards = numShards + numSkippedShards; - task.getSearchProgressActionListener().onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false); + task.getSearchProgressActionListener() + .onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false, createTimeProvider()); for (int i = 0; i < numShards; i++) { task.getSearchProgressActionListener() .onPartialReduce(shards.subList(i, i + 1), new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), null, 0); @@ -317,7 +326,8 @@ public void testFatalFailureDuringFetch() throws InterruptedException { skippedShards.add(new SearchShard(null, new ShardId("0", "0", 1))); } int totalShards = numShards + numSkippedShards; - task.getSearchProgressActionListener().onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false); + task.getSearchProgressActionListener() + .onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false, createTimeProvider()); for (int i = 0; i < numShards; i++) { task.getSearchProgressActionListener() .onPartialReduce(shards.subList(0, i + 1), new TotalHits(0, TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO), null, 0); @@ -349,7 +359,8 @@ public void testFatalFailureWithNoCause() throws InterruptedException { skippedShards.add(new SearchShard(null, new ShardId("0", "0", 1))); } int totalShards = numShards + numSkippedShards; - task.getSearchProgressActionListener().onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false); + task.getSearchProgressActionListener() + .onListShards(shards, skippedShards, SearchResponse.Clusters.EMPTY, false, createTimeProvider()); listener.onFailure(new SearchPhaseExecutionException("fetch", "boum", ShardSearchFailure.EMPTY_ARRAY)); assertCompletionListeners(task, totalShards, 0, numSkippedShards, 0, true, true); @@ -375,7 +386,7 @@ public void onFailure(Exception e) { } }, TimeValue.timeValueMillis(500L)); asyncSearchTask.getSearchProgressActionListener() - .onListShards(Collections.emptyList(), Collections.emptyList(), SearchResponse.Clusters.EMPTY, false); + .onListShards(Collections.emptyList(), Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, createTimeProvider()); assertTrue(latch.await(1000, TimeUnit.SECONDS)); assertThat(failure.get(), instanceOf(RuntimeException.class)); } @@ -384,7 +395,7 @@ public void testAddCompletionListenerScheduleErrorInitListenerExecutedImmediatel throwOnSchedule = true; AsyncSearchTask asyncSearchTask = createAsyncSearchTask(); asyncSearchTask.getSearchProgressActionListener() - .onListShards(Collections.emptyList(), Collections.emptyList(), SearchResponse.Clusters.EMPTY, false); + .onListShards(Collections.emptyList(), Collections.emptyList(), SearchResponse.Clusters.EMPTY, false, createTimeProvider()); CountDownLatch latch = new CountDownLatch(1); AtomicReference failure = new AtomicReference<>(); // onListShards has already been executed, then addCompletionListener is executed immediately diff --git a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/CancellingAggregationBuilder.java b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/CancellingAggregationBuilder.java index 773c70c3b64ec..90358014e1ee4 100644 --- a/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/CancellingAggregationBuilder.java +++ b/x-pack/plugin/async-search/src/test/java/org/elasticsearch/xpack/search/CancellingAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.search; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.index.query.QueryBuilders; @@ -106,6 +107,6 @@ public BucketCardinality bucketCardinality() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java index 33d9763acc2ba..5c885ad718d8c 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.autoscaling; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -113,7 +114,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_8_0; + return TransportVersions.V_7_8_0; } @Override @@ -172,7 +173,7 @@ static Diff readFrom(final StreamInput in) throws IOE @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_8_0; + return TransportVersions.V_7_8_0; } } } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java index 029729f9cad12..7944fb6738b0f 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/GetAutoscalingCapacityAction.java @@ -90,7 +90,7 @@ public Response(final StreamInput in) throws IOException { @Override public void writeTo(final StreamOutput out) throws IOException { - out.writeMap(results, StreamOutput::writeString, (o, decision) -> decision.writeTo(o)); + out.writeMap(results, StreamOutput::writeWriteable); } public SortedMap results() { diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java index 3a8f09f021d1b..fe0cda75503e0 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/PutAutoscalingPolicyAction.java @@ -87,7 +87,7 @@ public Request(final StreamInput in) throws IOException { super(in); this.name = in.readString(); if (in.readBoolean()) { - this.roles = in.readSet(StreamInput::readString).stream().collect(Sets.toUnmodifiableSortedSet()); + this.roles = in.readCollectionAsSet(StreamInput::readString).stream().collect(Sets.toUnmodifiableSortedSet()); } else { this.roles = null; } @@ -109,7 +109,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); if (roles != null) { out.writeBoolean(true); - out.writeCollection(roles, StreamOutput::writeString); + out.writeStringCollection(roles); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCapacity.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCapacity.java index 5037cd44f7670..f5197d4efe346 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCapacity.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCapacity.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.autoscaling.capacity; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -46,7 +46,7 @@ public AutoscalingResources(ByteSizeValue storage, ByteSizeValue memory, Process public AutoscalingResources(StreamInput in) throws IOException { this.storage = in.readOptionalWriteable(ByteSizeValue::readFrom); this.memory = in.readOptionalWriteable(ByteSizeValue::readFrom); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.processors = in.readOptionalWriteable(Processors::readFrom); } else { this.processors = null; @@ -88,7 +88,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(storage); out.writeOptionalWriteable(memory); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalWriteable(processors); } } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java index b45a38614a1b7..d076d2a3ec117 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingDeciderResults.java @@ -61,7 +61,7 @@ public AutoscalingDeciderResults( public AutoscalingDeciderResults(final StreamInput in) throws IOException { this.currentCapacity = new AutoscalingCapacity(in); - this.currentNodes = in.readSet(DiscoveryNode::new) + this.currentNodes = in.readCollectionAsSet(DiscoveryNode::new) .stream() .collect(Collectors.toCollection(() -> new TreeSet<>(DISCOVERY_NODE_COMPARATOR))); this.results = new TreeMap<>(in.readMap(AutoscalingDeciderResult::new)); @@ -71,7 +71,7 @@ public AutoscalingDeciderResults(final StreamInput in) throws IOException { public void writeTo(final StreamOutput out) throws IOException { currentCapacity.writeTo(out); out.writeCollection(currentNodes); - out.writeMap(results, StreamOutput::writeString, (output, result) -> result.writeTo(output)); + out.writeMap(results, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/FixedAutoscalingDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/FixedAutoscalingDeciderService.java index 81d2122d71411..9c563eef9065e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/FixedAutoscalingDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/FixedAutoscalingDeciderService.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.autoscaling.capacity; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -119,7 +119,7 @@ public FixedReason(StreamInput in) throws IOException { this.storage = in.readOptionalWriteable(ByteSizeValue::readFrom); this.memory = in.readOptionalWriteable(ByteSizeValue::readFrom); this.nodes = in.readInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.processors = in.readOptionalWriteable(Processors::readFrom); } else { this.processors = null; @@ -149,7 +149,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(storage); out.writeOptionalWriteable(memory); out.writeInt(nodes); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalWriteable(processors); } } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java index 9b52162a14ca0..80c626e8b24d7 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/existence/FrozenExistenceDeciderService.java @@ -85,7 +85,7 @@ public FrozenExistenceReason(List indices) { } public FrozenExistenceReason(StreamInput in) throws IOException { - this.indices = in.readStringList(); + this.indices = in.readStringCollectionAsList(); } @Override diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java index 64a50687acba1..209fe3b3fd355 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/policy/AutoscalingPolicy.java @@ -87,7 +87,7 @@ public AutoscalingPolicy(final String name, SortedSet roles, final Sorte public AutoscalingPolicy(final StreamInput in) throws IOException { this.name = in.readString(); - this.roles = in.readSet(StreamInput::readString).stream().collect(Sets.toUnmodifiableSortedSet()); + this.roles = in.readCollectionAsSet(StreamInput::readString).stream().collect(Sets.toUnmodifiableSortedSet()); int deciderCount = in.readInt(); SortedMap decidersMap = new TreeMap<>(); for (int i = 0; i < deciderCount; ++i) { @@ -99,7 +99,7 @@ public AutoscalingPolicy(final StreamInput in) throws IOException { @Override public void writeTo(final StreamOutput out) throws IOException { out.writeString(name); - out.writeCollection(roles, StreamOutput::writeString); + out.writeStringCollection(roles); out.writeInt(deciders.size()); for (Map.Entry entry : deciders.entrySet()) { out.writeString(entry.getKey()); diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/NodeDecisions.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/NodeDecisions.java index 709e19ccbfbcd..c472eda4e4722 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/NodeDecisions.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/NodeDecisions.java @@ -29,7 +29,7 @@ class NodeDecisions implements ToXContentObject, Writeable { } NodeDecisions(StreamInput in) throws IOException { - canAllocateDecisions = in.readList(NodeDecision::new); + canAllocateDecisions = in.readCollectionAsList(NodeDecision::new); canRemainDecision = in.readOptionalWriteable(NodeDecision::new); } @@ -44,7 +44,7 @@ NodeDecision canRemainDecision() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(canAllocateDecisions); + out.writeCollection(canAllocateDecisions); out.writeOptionalWriteable(canRemainDecision); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index f807583510005..c94ffd5b20798 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.autoscaling.storage; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterInfo; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.DiskUsage; @@ -16,6 +17,7 @@ import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeFilters; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -581,7 +583,12 @@ private IndexMetadata indexMetadata(ShardRouting shard, RoutingAllocation alloca return allocation.metadata().getIndexSafe(shard.index()); } - private Optional highestPreferenceTier(List preferredTiers, DiscoveryNodes unused, DesiredNodes desiredNodes) { + private Optional highestPreferenceTier( + List preferredTiers, + DiscoveryNodes unused, + DesiredNodes desiredNodes, + NodesShutdownMetadata shutdownMetadata + ) { return Optional.of(highestPreferenceTier(preferredTiers)); } @@ -964,8 +971,8 @@ public Iterator toXContentChunked(ToXContent.Params params public static class ReactiveReason implements AutoscalingDeciderResult.Reason { static final int MAX_AMOUNT_OF_SHARDS = 512; - private static final TransportVersion SHARD_IDS_OUTPUT_VERSION = TransportVersion.V_8_4_0; - private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersion.V_8_500_010; + private static final TransportVersion SHARD_IDS_OUTPUT_VERSION = TransportVersions.V_8_4_0; + private static final TransportVersion UNASSIGNED_NODE_DECISIONS_OUTPUT_VERSION = TransportVersions.V_8_500_020; private final String reason; private final long unassigned; @@ -1002,8 +1009,8 @@ public ReactiveReason(StreamInput in) throws IOException { this.unassigned = in.readLong(); this.assigned = in.readLong(); if (in.getTransportVersion().onOrAfter(SHARD_IDS_OUTPUT_VERSION)) { - unassignedShardIds = Collections.unmodifiableSortedSet(new TreeSet<>(in.readSet(ShardId::new))); - assignedShardIds = Collections.unmodifiableSortedSet(new TreeSet<>(in.readSet(ShardId::new))); + unassignedShardIds = Collections.unmodifiableSortedSet(new TreeSet<>(in.readCollectionAsSet(ShardId::new))); + assignedShardIds = Collections.unmodifiableSortedSet(new TreeSet<>(in.readCollectionAsSet(ShardId::new))); } else { unassignedShardIds = Collections.emptySortedSet(); assignedShardIds = Collections.emptySortedSet(); diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java index f0c08bce0d28e..8eb7e32bfdd3a 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/common/BlobCacheBufferedIndexInput.java @@ -20,6 +20,8 @@ /** * Copy of {@link org.apache.lucene.store.BufferedIndexInput} that contains optimizations that haven't made it to the Lucene version used * by Elasticsearch yet or that are only applicable to Elasticsearch. + *

+ * Deviates from Lucene's implementation slightly to fix a bug - see [NOTE: Missing Seek] below, and #98970 for more details. */ public abstract class BlobCacheBufferedIndexInput extends IndexInput implements RandomAccessInput { @@ -106,13 +108,14 @@ public final void readBytes(byte[] b, int offset, int len, boolean useBuffer) th buffer.get(b, offset, len); } } else { - // The amount left to read is larger than the buffer - // or we've been asked to not use our buffer - - // there's no performance reason not to read it all - // at once. Note that unlike the previous code of - // this function, there is no need to do a seek - // here, because there's no need to reread what we - // had in the buffer. + // The amount left to read is larger than the buffer or we've been asked to not use our buffer - there's no performance + // reason not to read it all at once. + if (buffer == EMPTY_BYTEBUFFER) { + // fresh clone, must seek + // [NOTE: Missing Seek] This deviates from Lucene's BufferedIndexInput implementation - see #98970 + seekInternal(bufferStart); + } // else there's no need to do a seek here because we are already positioned correctly + long after = bufferStart + buffer.position() + len; if (after > length()) throw new EOFException("read past EOF: " + this); readInternal(ByteBuffer.wrap(b, offset, len)); diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java index b0bd3d955322e..d7cbddf490df5 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBlobCacheService.java @@ -55,7 +55,7 @@ import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.LongAdder; -import java.util.function.LongConsumer; +import java.util.function.IntConsumer; import java.util.function.Predicate; import java.util.stream.Collectors; @@ -263,7 +263,7 @@ public void validate(ByteSizeValue value, Map, Object> settings, bool private final SharedBytes sharedBytes; private final long cacheSize; - private final long regionSize; + private final int regionSize; private final ByteSizeValue rangeSize; private final ByteSizeValue recoveryRangeSize; @@ -308,7 +308,7 @@ public SharedBlobCacheService( throw new IllegalStateException("unable to probe size of filesystem [" + environment.nodeDataPaths()[0] + "]"); } this.cacheSize = calculateCacheSize(settings, totalFsSize); - final long regionSize = SHARED_CACHE_REGION_SIZE_SETTING.get(settings).getBytes(); + final int regionSize = Math.toIntExact(SHARED_CACHE_REGION_SIZE_SETTING.get(settings).getBytes()); this.numRegions = Math.toIntExact(cacheSize / regionSize); keyMapping = new ConcurrentHashMap<>(); if (Assertions.ENABLED) { @@ -336,7 +336,7 @@ public SharedBlobCacheService( for (int i = 0; i < numRegions; i++) { freeRegions.add(sharedBytes.getFileChannel(i)); } - decayTask = new CacheDecayTask(threadPool, SHARED_CACHE_DECAY_INTERVAL_SETTING.get(settings)); + decayTask = new CacheDecayTask(threadPool, threadPool.generic(), SHARED_CACHE_DECAY_INTERVAL_SETTING.get(settings)); decayTask.rescheduleIfNecessary(); this.rangeSize = SHARED_CACHE_RANGE_SIZE_SETTING.get(settings); this.recoveryRangeSize = SHARED_CACHE_RECOVERY_RANGE_SIZE_SETTING.get(settings); @@ -360,16 +360,16 @@ private int getRegion(long position) { return (int) (position / regionSize); } - private long getRegionRelativePosition(long position) { - return position % regionSize; + private int getRegionRelativePosition(long position) { + return (int) (position % regionSize); } private long getRegionStart(int region) { - return region * regionSize; + return (long) region * regionSize; } private long getRegionEnd(int region) { - return (region + 1) * regionSize; + return (long) (region + 1) * regionSize; } private int getEndingRegion(long position) { @@ -393,12 +393,12 @@ private ByteRange mapSubRangeToRegion(ByteRange range, int region) { ); } - private long getRegionSize(long fileLength, int region) { + private int getRegionSize(long fileLength, int region) { assert fileLength > 0; final int maxRegion = getEndingRegion(fileLength); assert region >= 0 && region <= maxRegion : region + " - " + maxRegion; - final long effectiveRegionSize; - if (region == maxRegion && (region + 1) * regionSize != fileLength) { + final int effectiveRegionSize; + if (region == maxRegion && (long) (region + 1) * regionSize != fileLength) { assert getRegionRelativePosition(fileLength) != 0L; effectiveRegionSize = getRegionRelativePosition(fileLength); } else { @@ -415,7 +415,7 @@ Entry get(KeyType cacheKey, long fileLength, int region) { // find an entry var entry = keyMapping.get(regionKey); if (entry == null) { - final long effectiveRegionSize = getRegionSize(fileLength, region); + final int effectiveRegionSize = getRegionSize(fileLength, region); entry = keyMapping.computeIfAbsent(regionKey, key -> new Entry<>(new CacheFileRegion(key, effectiveRegionSize), now)); } // io is volatile, double locking is fine, as long as we assign it last. @@ -449,33 +449,52 @@ Entry get(KeyType cacheKey, long fileLength, int region) { * @param cacheKey the key to fetch data for * @param length the length of the blob to fetch * @param writer a writer that handles writing of newly downloaded data to the shared cache + * @param listener listener that is called once all downloading has finished * - * @return {@code true} if there were enough free pages to start downloading + * @return {@code true} if there were enough free pages to start downloading the full entry */ - public boolean maybeFetchFullEntry(KeyType cacheKey, long length, RangeMissingHandler writer) { + public boolean maybeFetchFullEntry(KeyType cacheKey, long length, RangeMissingHandler writer, ActionListener listener) { int finalRegion = getEndingRegion(length); if (freeRegionCount() < finalRegion) { // Not enough room to download a full file without evicting existing data, so abort + listener.onResponse(null); return false; } long regionLength = regionSize; - for (int region = 0; region <= finalRegion; region++) { - var entry = get(cacheKey, length, region); - if (region == finalRegion) { - regionLength = length - getRegionStart(region); - } - ByteRange rangeToWrite = ByteRange.of(0, regionLength); - if (rangeToWrite.length() == 0) { - return true; + try (RefCountingListener refCountingListener = new RefCountingListener(listener)) { + for (int region = 0; region <= finalRegion; region++) { + if (region == finalRegion) { + regionLength = length - getRegionStart(region); + } + ByteRange rangeToWrite = ByteRange.of(0, regionLength); + if (rangeToWrite.isEmpty()) { + return true; + } + final ActionListener regionListener = refCountingListener.acquire(ignored -> {}); + final SharedBlobCacheService.Entry entry; + try { + entry = get(cacheKey, length, region); + } catch (AlreadyClosedException e) { + // failed to grab a cache page because some other operation concurrently acquired some + regionListener.onResponse(0); + return false; + } + // set read range == write range so the listener completes only once all the bytes have been downloaded + entry.chunk.populateAndRead( + rangeToWrite, + rangeToWrite, + (channel, pos, relativePos, len) -> Math.toIntExact(len), + writer, + bulkIOExecutor, + regionListener.delegateResponse((l, e) -> { + if (e instanceof AlreadyClosedException) { + l.onResponse(0); + } else { + l.onFailure(e); + } + }) + ); } - entry.chunk.populateAndRead( - rangeToWrite, - ByteRange.EMPTY, - (channel, pos, relativePos, len) -> 0, - writer, - bulkIOExecutor, - ActionListener.noop() - ); } return true; } @@ -726,8 +745,8 @@ public void close() { class CacheDecayTask extends AbstractAsyncTask { - CacheDecayTask(ThreadPool threadPool, TimeValue interval) { - super(logger, Objects.requireNonNull(threadPool), Objects.requireNonNull(interval), true); + CacheDecayTask(ThreadPool threadPool, Executor executor, TimeValue interval) { + super(logger, Objects.requireNonNull(threadPool), executor, Objects.requireNonNull(interval), true); } @Override @@ -740,11 +759,6 @@ public void runInternal() { computeDecay(); } - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } - @Override public String toString() { return "shared_cache_decay_task"; @@ -820,19 +834,15 @@ class CacheFileRegion extends EvictableRefCounted { final SparseFileTracker tracker; volatile SharedBytes.IO io = null; - CacheFileRegion(RegionKey regionKey, long regionSize) { + CacheFileRegion(RegionKey regionKey, int regionSize) { this.regionKey = regionKey; - assert regionSize > 0L; + assert regionSize > 0; tracker = new SparseFileTracker("file", regionSize); } public long physicalStartOffset() { var ioRef = io; - return ioRef == null ? -1L : ioRef.pageStart(); - } - - public long physicalEndOffset() { - return physicalStartOffset() + sharedBytes.regionSize; + return ioRef == null ? -1L : (long) regionKey.region * regionSize; } // tries to evict this chunk if noone is holding onto its resources anymore @@ -881,11 +891,9 @@ private static void throwAlreadyEvicted() { } boolean tryRead(ByteBuffer buf, long offset) throws IOException { - int startingPos = buf.position(); - var ioRef = io; - ioRef.read(buf, ioRef.pageStart() + getRegionRelativePosition(offset)); + int readBytes = io.read(buf, getRegionRelativePosition(offset)); if (isEvicted()) { - buf.position(startingPos); + buf.position(buf.position() - readBytes); return false; } return true; @@ -909,14 +917,9 @@ void populateAndRead( rangeToRead, ActionListener.runBefore(listener, resource::close).delegateFailureAndWrap((l, success) -> { var ioRef = io; - final long physicalStartOffset = ioRef.pageStart(); assert regionOwners.get(ioRef) == this; - final int read = reader.onRangeAvailable( - ioRef, - physicalStartOffset + rangeToRead.start(), - rangeToRead.start(), - rangeToRead.length() - ); + final int start = Math.toIntExact(rangeToRead.start()); + final int read = reader.onRangeAvailable(ioRef, start, start, Math.toIntExact(rangeToRead.length())); assert read == rangeToRead.length() : "partial read [" + read @@ -946,14 +949,14 @@ private void fillGaps(Executor executor, RangeMissingHandler writer, List gap.onProgress(start + progress) ); writeCount.increment(); @@ -1065,8 +1068,8 @@ private int readSingleRegion( fileRegion.populateAndRead( mapSubRangeToRegion(rangeToWrite, region), mapSubRangeToRegion(rangeToRead, region), - readerWithOffset(reader, fileRegion, rangeToRead.start() - regionStart), - writerWithOffset(writer, fileRegion, rangeToWrite.start() - regionStart), + readerWithOffset(reader, fileRegion, Math.toIntExact(rangeToRead.start() - regionStart)), + writerWithOffset(writer, fileRegion, Math.toIntExact(rangeToWrite.start() - regionStart)), ioExecutor, readFuture ); @@ -1095,8 +1098,8 @@ private int readMultiRegions( fileRegion.populateAndRead( mapSubRangeToRegion(rangeToWrite, region), subRangeToRead, - readerWithOffset(reader, fileRegion, rangeToRead.start() - regionStart), - writerWithOffset(writer, fileRegion, rangeToWrite.start() - regionStart), + readerWithOffset(reader, fileRegion, Math.toIntExact(rangeToRead.start() - regionStart)), + writerWithOffset(writer, fileRegion, Math.toIntExact(rangeToWrite.start() - regionStart)), ioExecutor, listeners.acquire(i -> bytesRead.updateAndGet(j -> Math.addExact(i, j))) ); @@ -1106,7 +1109,7 @@ private int readMultiRegions( return bytesRead.get(); } - private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFileRegion fileRegion, long writeOffset) { + private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFileRegion fileRegion, int writeOffset) { final RangeMissingHandler adjustedWriter; if (writeOffset == 0) { // no need to allocate a new capturing lambda if the offset isn't adjusted @@ -1129,7 +1132,7 @@ private RangeMissingHandler writerWithOffset(RangeMissingHandler writer, CacheFi return adjustedWriter; } - private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, CacheFileRegion fileRegion, long readOffset) { + private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, CacheFileRegion fileRegion, int readOffset) { final RangeAvailableHandler adjustedReader = (channel, channelPos, relativePos, len) -> reader.onRangeAvailable( channel, channelPos, @@ -1145,9 +1148,9 @@ private RangeAvailableHandler readerWithOffset(RangeAvailableHandler reader, Cac return adjustedReader; } - private boolean assertValidRegionAndLength(CacheFileRegion fileRegion, long channelPos, long len) { + private boolean assertValidRegionAndLength(CacheFileRegion fileRegion, int channelPos, int len) { assert regionOwners.get(fileRegion.io) == fileRegion; - assert channelPos >= fileRegion.physicalStartOffset() && channelPos + len <= fileRegion.physicalEndOffset(); + assert channelPos >= 0 && channelPos + len <= regionSize; return true; } @@ -1165,12 +1168,12 @@ public CacheFile getCacheFile(KeyType cacheKey, long length) { public interface RangeAvailableHandler { // caller that wants to read from x should instead do a positional read from x + relativePos // caller should also only read up to length, further bytes will be offered by another call to this method - int onRangeAvailable(SharedBytes.IO channel, long channelPos, long relativePos, long length) throws IOException; + int onRangeAvailable(SharedBytes.IO channel, int channelPos, int relativePos, int length) throws IOException; } @FunctionalInterface public interface RangeMissingHandler { - void fillCacheRange(SharedBytes.IO channel, long channelPos, long relativePos, long length, LongConsumer progressUpdater) + void fillCacheRange(SharedBytes.IO channel, int channelPos, int relativePos, int length, IntConsumer progressUpdater) throws IOException; } diff --git a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java index 03c423805558d..04347aaf6bff2 100644 --- a/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java +++ b/x-pack/plugin/blob-cache/src/main/java/org/elasticsearch/blobcache/shared/SharedBytes.java @@ -28,7 +28,6 @@ import java.nio.file.Path; import java.nio.file.StandardOpenOption; import java.util.function.IntConsumer; -import java.util.function.LongConsumer; public class SharedBytes extends AbstractRefCounted { @@ -58,7 +57,7 @@ public class SharedBytes extends AbstractRefCounted { private final IO[] ios; - final long regionSize; + final int regionSize; // TODO: for systems like Windows without true p-write/read support we should split this up into multiple channels since positional // operations in #IO are not contention-free there (https://bugs.java.com/bugdatabase/view_bug.do?bug_id=6265734) @@ -70,11 +69,11 @@ public class SharedBytes extends AbstractRefCounted { private final boolean mmap; - SharedBytes(int numRegions, long regionSize, NodeEnvironment environment, IntConsumer writeBytes, IntConsumer readBytes, boolean mmap) + SharedBytes(int numRegions, int regionSize, NodeEnvironment environment, IntConsumer writeBytes, IntConsumer readBytes, boolean mmap) throws IOException { this.numRegions = numRegions; this.regionSize = regionSize; - final long fileSize = numRegions * regionSize; + final long fileSize = (long) numRegions * regionSize; Path cacheFile = null; if (fileSize > 0) { cacheFile = findCacheSnapshotCacheFilePath(environment, fileSize); @@ -92,7 +91,7 @@ public class SharedBytes extends AbstractRefCounted { this.ios = new IO[numRegions]; if (mmap && fileSize > 0) { int regionsPerMmap = Math.toIntExact(MAX_BYTES_PER_MAP / regionSize); - int mapSize = Math.toIntExact(regionsPerMmap * regionSize); + int mapSize = regionsPerMmap * regionSize; int lastMapSize = Math.toIntExact(fileSize % mapSize); int mapCount = Math.toIntExact(fileSize / mapSize) + (lastMapSize == 0 ? 0 : 1); MappedByteBuffer[] mmaps = new MappedByteBuffer[mapCount]; @@ -105,10 +104,7 @@ public class SharedBytes extends AbstractRefCounted { lastMapSize == 0 ? mapSize : lastMapSize ); for (int i = 0; i < numRegions; i++) { - ios[i] = new IO( - i, - mmaps[i / regionsPerMmap].slice(Math.toIntExact((i % regionsPerMmap) * regionSize), Math.toIntExact(regionSize)) - ); + ios[i] = new IO(i, mmaps[i / regionsPerMmap].slice((i % regionsPerMmap) * regionSize, regionSize)); } } else { for (int i = 0; i < numRegions; i++) { @@ -157,21 +153,20 @@ public static Path findCacheSnapshotCacheFilePath(NodeEnvironment environment, l public static void copyToCacheFileAligned( IO fc, InputStream input, - long fileChannelPos, - long relativePos, - long length, - LongConsumer progressUpdater, + int fileChannelPos, + int relativePos, + int length, + IntConsumer progressUpdater, ByteBuffer buf ) throws IOException { - long bytesCopied = 0L; + int bytesCopied = 0; long remaining = length; while (remaining > 0L) { final int bytesRead = BlobCacheUtils.readSafe(input, buf, relativePos, remaining); if (buf.hasRemaining()) { break; } - long bytesWritten = positionalWrite(fc, fileChannelPos + bytesCopied, buf); - bytesCopied += bytesWritten; + bytesCopied += positionalWrite(fc, fileChannelPos + bytesCopied, buf); progressUpdater.accept(bytesCopied); remaining -= bytesRead; } @@ -180,15 +175,14 @@ public static void copyToCacheFileAligned( final int remainder = buf.position() % PAGE_SIZE; final int adjustment = remainder == 0 ? 0 : PAGE_SIZE - remainder; buf.position(buf.position() + adjustment); - long bytesWritten = positionalWrite(fc, fileChannelPos + bytesCopied, buf); - bytesCopied += bytesWritten; - final long adjustedBytesCopied = bytesCopied - adjustment; // adjust to not break RangeFileTracker + bytesCopied += positionalWrite(fc, fileChannelPos + bytesCopied, buf); + final int adjustedBytesCopied = bytesCopied - adjustment; // adjust to not break RangeFileTracker assert adjustedBytesCopied == length : adjustedBytesCopied + " vs " + length; progressUpdater.accept(adjustedBytesCopied); } } - private static int positionalWrite(IO fc, long start, ByteBuffer byteBuffer) throws IOException { + private static int positionalWrite(IO fc, int start, ByteBuffer byteBuffer) throws IOException { byteBuffer.flip(); int written = fc.write(byteBuffer, start); assert byteBuffer.hasRemaining() == false; @@ -207,18 +201,13 @@ private static int positionalWrite(IO fc, long start, ByteBuffer byteBuffer) thr * @return number of bytes read * @throws IOException on failure */ - public static int readCacheFile( - final IO fc, - long channelPos, - long relativePos, - long length, - final ByteBufferReference byteBufferReference - ) throws IOException { + public static int readCacheFile(final IO fc, int channelPos, int relativePos, int length, final ByteBufferReference byteBufferReference) + throws IOException { if (length == 0L) { return 0; } final int bytesRead; - final ByteBuffer dup = byteBufferReference.tryAcquire(Math.toIntExact(relativePos), Math.toIntExact(length)); + final ByteBuffer dup = byteBufferReference.tryAcquire(relativePos, length); if (dup != null) { try { bytesRead = fc.read(dup, channelPos); @@ -230,7 +219,7 @@ public static int readCacheFile( } } else { // return fake response - return Math.toIntExact(length); + return length; } return bytesRead; } @@ -256,50 +245,49 @@ public final class IO { private final MappedByteBuffer mappedByteBuffer; private IO(final int sharedBytesPos, MappedByteBuffer mappedByteBuffer) { - long physicalOffset = sharedBytesPos * regionSize; - assert physicalOffset <= numRegions * regionSize; + long physicalOffset = (long) sharedBytesPos * regionSize; + assert physicalOffset <= (long) numRegions * regionSize; this.pageStart = physicalOffset; this.mappedByteBuffer = mappedByteBuffer; } - public long pageStart() { - return pageStart; - } - @SuppressForbidden(reason = "Use positional reads on purpose") - public int read(ByteBuffer dst, long position) throws IOException { - checkOffsets(position, dst.remaining()); + public int read(ByteBuffer dst, int position) throws IOException { + int remaining = dst.remaining(); + checkOffsets(position, remaining); final int bytesRead; if (mmap) { - bytesRead = dst.remaining(); + bytesRead = remaining; int startPosition = dst.position(); - dst.put(startPosition, mappedByteBuffer, Math.toIntExact(position - pageStart), bytesRead) - .position(startPosition + bytesRead); + dst.put(startPosition, mappedByteBuffer, position, bytesRead).position(startPosition + bytesRead); } else { - bytesRead = fileChannel.read(dst, position); + bytesRead = fileChannel.read(dst, pageStart + position); } readBytes.accept(bytesRead); return bytesRead; } @SuppressForbidden(reason = "Use positional writes on purpose") - public int write(ByteBuffer src, long position) throws IOException { + public int write(ByteBuffer src, int position) throws IOException { // check if writes are page size aligned for optimal performance assert position % PAGE_SIZE == 0; assert src.remaining() % PAGE_SIZE == 0; checkOffsets(position, src.remaining()); - int bytesWritten = fileChannel.write(src, position); + int bytesWritten = fileChannel.write(src, pageStart + position); writeBytes.accept(bytesWritten); return bytesWritten; } - private void checkOffsets(long position, long length) { - long pageEnd = pageStart + regionSize; - if (position < pageStart || position > pageEnd || position + length > pageEnd) { - assert false; - throw new IllegalArgumentException("bad access"); + private void checkOffsets(int position, int length) { + if (position < 0 || position + length > regionSize) { + offsetCheckFailed(); } } + + private static void offsetCheckFailed() { + assert false; + throw new IllegalArgumentException("bad access"); + } } } diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java index aa15adecead9b..181b8d10ec863 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBlobCacheServiceTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.blobcache.shared; import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.blobcache.common.ByteRange; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -35,7 +36,6 @@ import java.util.Objects; import java.util.Set; import java.util.concurrent.BrokenBarrierException; -import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; @@ -369,16 +369,16 @@ public void execute(Runnable command) { { final var cacheKey = generateCacheKey(); assertEquals(5, cacheService.freeRegionCount()); - AtomicLong bytesRead = new AtomicLong(size(250)); - CountDownLatch latch = new CountDownLatch(1); - cacheService.maybeFetchFullEntry(cacheKey, size(250), (channel, channelPos, relativePos, length, progressUpdater) -> { + final long size = size(250); + AtomicLong bytesRead = new AtomicLong(size); + final PlainActionFuture future = PlainActionFuture.newFuture(); + cacheService.maybeFetchFullEntry(cacheKey, size, (channel, channelPos, relativePos, length, progressUpdater) -> { + bytesRead.addAndGet(-length); progressUpdater.accept(length); - if (bytesRead.addAndGet(-length) == 0) { - latch.countDown(); - } - }); + }, future); - assertTrue(latch.await(10, TimeUnit.SECONDS)); + future.get(10, TimeUnit.SECONDS); + assertEquals(0L, bytesRead.get()); assertEquals(2, cacheService.freeRegionCount()); assertEquals(3, bulkTaskCount.get()); } @@ -388,7 +388,7 @@ public void execute(Runnable command) { assertEquals(2, cacheService.freeRegionCount()); var configured = cacheService.maybeFetchFullEntry(cacheKey, size(500), (ch, chPos, relPos, len, update) -> { throw new AssertionError("Should never reach here"); - }); + }, ActionListener.noop()); assertFalse(configured); assertEquals(2, cacheService.freeRegionCount()); } @@ -397,6 +397,62 @@ public void execute(Runnable command) { threadPool.shutdown(); } + public void testFetchFullCacheEntryConcurrently() throws Exception { + Settings settings = Settings.builder() + .put(NODE_NAME_SETTING.getKey(), "node") + .put(SharedBlobCacheService.SHARED_CACHE_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(500)).getStringRep()) + .put(SharedBlobCacheService.SHARED_CACHE_REGION_SIZE_SETTING.getKey(), ByteSizeValue.ofBytes(size(100)).getStringRep()) + .put("path.home", createTempDir()) + .build(); + + ThreadPool threadPool = new TestThreadPool("test") { + @Override + public ExecutorService executor(String name) { + ExecutorService generic = super.executor(Names.GENERIC); + if (Objects.equals(name, "bulk")) { + return new StoppableExecutorServiceWrapper(generic); + } + return generic; + } + }; + + try ( + NodeEnvironment environment = new NodeEnvironment(settings, TestEnvironment.newEnvironment(settings)); + var cacheService = new SharedBlobCacheService<>(environment, settings, threadPool, ThreadPool.Names.GENERIC, "bulk") + ) { + + final long size = size(randomIntBetween(1, 100)); + final Thread[] threads = new Thread[10]; + for (int i = 0; i < threads.length; i++) { + threads[i] = new Thread(() -> { + for (int j = 0; j < 1000; j++) { + final var cacheKey = generateCacheKey(); + try { + PlainActionFuture.get( + f -> cacheService.maybeFetchFullEntry( + cacheKey, + size, + (channel, channelPos, relativePos, length, progressUpdater) -> progressUpdater.accept(length), + f + ) + ); + } catch (Exception e) { + throw new AssertionError(e); + } + } + }); + } + for (Thread thread : threads) { + thread.start(); + } + for (Thread thread : threads) { + thread.join(); + } + } finally { + assertTrue(ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS)); + } + } + public void testCacheSizeRejectedOnNonFrozenNodes() { String cacheSize = randomBoolean() ? ByteSizeValue.ofBytes(size(500)).getStringRep() diff --git a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java index 24625a91d0975..fa7ec6dbfd5a8 100644 --- a/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java +++ b/x-pack/plugin/blob-cache/src/test/java/org/elasticsearch/blobcache/shared/SharedBytesTests.java @@ -29,7 +29,7 @@ public void testReleasesFileCorrectly() throws Exception { try (var nodeEnv = new NodeEnvironment(nodeSettings, TestEnvironment.newEnvironment(nodeSettings))) { final SharedBytes sharedBytes = new SharedBytes( regions, - randomIntBetween(1, 16) * 4096L, + randomIntBetween(1, 16) * 4096, nodeEnv, ignored -> {}, ignored -> {}, diff --git a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java index 513eb1a1b857b..6701a576d6d09 100644 --- a/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java +++ b/x-pack/plugin/ccr/qa/src/main/java/org/elasticsearch/xpack/ccr/ESCCRRestTestCase.java @@ -347,11 +347,7 @@ protected static void createAutoFollowPattern( try (XContentBuilder bodyBuilder = JsonXContent.contentBuilder()) { bodyBuilder.startObject(); { - bodyBuilder.startArray("leader_index_patterns"); - { - bodyBuilder.value(pattern); - } - bodyBuilder.endArray(); + bodyBuilder.array("leader_index_patterns", pattern); if (followIndexPattern != null) { bodyBuilder.field("follow_index_pattern", followIndexPattern); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java index 68974c5ed3ecd..84586d6b2fa06 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRInfoTransportAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ccr; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.ActionFilters; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; @@ -89,7 +90,7 @@ public Usage(StreamInput in) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } public int getNumberOfFollowerIndices() { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 752aab933fb81..ec98a7f9123c2 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -8,6 +8,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.RequestValidators; @@ -143,7 +144,7 @@ public class Ccr extends Plugin implements ActionPlugin, PersistentTaskPlugin, E public static final String CCR_CUSTOM_METADATA_REMOTE_CLUSTER_NAME_KEY = "remote_cluster_name"; public static final String REQUESTED_OPS_MISSING_METADATA_KEY = "es.requested_operations_missing"; - public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersion.V_8_500_010; + public static final TransportVersion TRANSPORT_VERSION_ACTION_WITH_SHARD_ID = TransportVersions.V_8_500_020; private final boolean enabled; private final Settings settings; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java index ccc97caceb365..d31a6f681cd9e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardChangesAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ccr.action; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -266,7 +266,7 @@ public long getTookInMillis() { super(in); mappingVersion = in.readVLong(); settingsVersion = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { aliasesVersion = in.readVLong(); } else { aliasesVersion = 0; @@ -302,7 +302,7 @@ public long getTookInMillis() { public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(mappingVersion); out.writeVLong(settingsVersion); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { out.writeVLong(aliasesVersion); } out.writeZLong(globalCheckpoint); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 2a8d0c6cffa74..1ee15732d1339 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -76,6 +76,7 @@ import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.Consumer; import java.util.function.LongConsumer; @@ -93,6 +94,7 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor scheduler = (delay, command) -> threadPool.scheduleUnlessShuttingDown( - delay, - Ccr.CCR_THREAD_POOL_NAME, - command - ); + BiConsumer scheduler = (delay, command) -> threadPool.scheduleUnlessShuttingDown(delay, ccrExecutor, command); final String recordedLeaderShardHistoryUUID = getLeaderShardHistoryUUID(params); return new ShardFollowNodeTask( @@ -528,7 +527,7 @@ protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final Lo remoteClient(params), listener ); - }, retentionLeaseRenewInterval, Ccr.CCR_THREAD_POOL_NAME); + }, retentionLeaseRenewInterval, ccrExecutor); } private void logRetentionLeaseFailure(final String retentionLeaseId, final Throwable cause) { @@ -594,7 +593,7 @@ protected void nodeOperation(final AllocatedPersistentTask task, final ShardFoll e ); try { - threadPool.schedule(() -> nodeOperation(task, params, state), params.getMaxRetryDelay(), Ccr.CCR_THREAD_POOL_NAME); + threadPool.schedule(() -> nodeOperation(task, params, state), params.getMaxRetryDelay(), ccrExecutor); } catch (EsRejectedExecutionException rex) { rex.addSuppressed(e); shardFollowNodeTask.onFatalFailure(rex); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java index 5916def5420ac..e77c9dd68c4e1 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/bulk/BulkShardOperationsRequest.java @@ -28,7 +28,7 @@ public BulkShardOperationsRequest(StreamInput in) throws IOException { super(in); historyUUID = in.readString(); maxSeqNoOfUpdatesOrDeletes = in.readZLong(); - operations = in.readList(Translog.Operation::readOperation); + operations = in.readCollectionAsList(Translog.Operation::readOperation); } public BulkShardOperationsRequest( diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java index 413129c1e1fc5..fa8f8099900ce 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRepository.java @@ -407,7 +407,7 @@ public void restoreShard( } }, CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(store.indexSettings().getNodeSettings()), - Ccr.CCR_THREAD_POOL_NAME + remoteClientResponseExecutor ); toClose.add(() -> { logger.trace("{} canceling background renewal of retention lease [{}] at the end of restore", shardId, retentionLeaseId); @@ -583,6 +583,7 @@ void openSession( response.getStoreFileMetadata(), response.getMappingVersion(), threadPool, + chunkResponseExecutor, ccrSettings, throttledTime::inc, leaderShardId @@ -595,7 +596,7 @@ void openSession( threadPool, responseListener, ccrSettings.getRecoveryActionTimeout(), - ThreadPool.Names.GENERIC, + threadPool.generic(), // TODO should be the remote-client response executor to match the non-timeout case PutCcrRestoreSessionAction.INTERNAL_NAME ) ); @@ -611,6 +612,7 @@ private static class RestoreSession extends FileRestoreContext { private final CcrSettings ccrSettings; private final LongConsumer throttleListener; private final ThreadPool threadPool; + private final Executor timeoutExecutor; private final ShardId leaderShardId; RestoreSession( @@ -623,6 +625,7 @@ private static class RestoreSession extends FileRestoreContext { Store.MetadataSnapshot sourceMetadata, long mappingVersion, ThreadPool threadPool, + Executor timeoutExecutor, CcrSettings ccrSettings, LongConsumer throttleListener, ShardId leaderShardId @@ -634,6 +637,7 @@ private static class RestoreSession extends FileRestoreContext { this.sourceMetadata = sourceMetadata; this.mappingVersion = mappingVersion; this.threadPool = threadPool; + this.timeoutExecutor = timeoutExecutor; this.ccrSettings = ccrSettings; this.throttleListener = throttleListener; this.leaderShardId = leaderShardId; @@ -685,7 +689,7 @@ protected void executeChunkRequest(FileChunk request, ActionListener liste ListenerTimeouts.wrapWithTimeout(threadPool, listener.map(getCcrRestoreFileChunkResponse -> { writeFileChunk(request.md, getCcrRestoreFileChunkResponse); return null; - }), ccrSettings.getRecoveryActionTimeout(), ThreadPool.Names.GENERIC, GetCcrRestoreFileChunkAction.INTERNAL_NAME) + }), ccrSettings.getRecoveryActionTimeout(), timeoutExecutor, GetCcrRestoreFileChunkAction.INTERNAL_NAME) ); } @@ -740,7 +744,7 @@ public void close(ActionListener listener) { threadPool, listener, ccrSettings.getRecoveryActionTimeout(), - ThreadPool.Names.GENERIC, + timeoutExecutor, ClearCcrRestoreSessionAction.INTERNAL_NAME ); ClearCcrRestoreSessionRequest clearRequest = new ClearCcrRestoreSessionRequest(sessionUUID, node, leaderShardId); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java index 2442a8fb1c6af..1a822e2dce935 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/repository/CcrRestoreSourceService.java @@ -191,7 +191,7 @@ private void internalCloseSession(String sessionUUID, boolean throwIfSessionMiss private Scheduler.Cancellable scheduleTimeout(String sessionUUID) { TimeValue idleTimeout = ccrSettings.getRecoveryActivityTimeout(); - return threadPool.scheduleWithFixedDelay(() -> maybeTimeout(sessionUUID), idleTimeout, ThreadPool.Names.GENERIC); + return threadPool.scheduleWithFixedDelay(() -> maybeTimeout(sessionUUID), idleTimeout, threadPool.generic()); } private void maybeTimeout(String sessionUUID) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java index 6ebb165129442..ff3c029cd9974 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowNodeTaskRandomTests.java @@ -115,7 +115,7 @@ private ShardFollowNodeTask createShardFollowTask(int concurrency, TestRun testR BiConsumer scheduler = (delay, task) -> { assert delay.millis() < 100 : "The delay should be kept to a minimum, so that this test does not take to long to run"; if (stopped.get() == false) { - threadPool.schedule(task, delay, ThreadPool.Names.GENERIC); + threadPool.schedule(task, delay, threadPool.generic()); } }; List receivedOperations = Collections.synchronizedList(new ArrayList<>()); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java index 18c2bfb061598..78132749e2923 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskReplicationTests.java @@ -581,7 +581,7 @@ private ShardFollowNodeTask createShardFollowTask(ReplicationGroup leaderGroup, ); final String recordedLeaderIndexHistoryUUID = leaderGroup.getPrimary().getHistoryUUID(); - BiConsumer scheduler = (delay, task) -> threadPool.schedule(task, delay, ThreadPool.Names.GENERIC); + BiConsumer scheduler = (delay, task) -> threadPool.schedule(task, delay, threadPool.generic()); AtomicBoolean stopped = new AtomicBoolean(false); Set fetchOperations = new HashSet<>(); return new ShardFollowNodeTask( @@ -725,7 +725,7 @@ protected Scheduler.Cancellable scheduleBackgroundRetentionLeaseRenewal(final Lo return threadPool.scheduleWithFixedDelay( () -> leaderGroup.renewRetentionLease(retentionLeaseId, followerGlobalCheckpoint.getAsLong(), "ccr"), CcrRetentionLeases.RETENTION_LEASE_RENEW_INTERVAL_SETTING.get(followerGroup.getPrimary().indexSettings().getSettings()), - ThreadPool.Names.GENERIC + threadPool.generic() ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index d22cf83a4d8db..7d55a492b112a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -122,7 +122,7 @@ public ClusterStateLicenseService( this.scheduler.register(this); populateExpirationCallbacks(); - threadPool.scheduleWithFixedDelay(xPacklicenseState::cleanupUsageTracking, TimeValue.timeValueHours(1), ThreadPool.Names.GENERIC); + threadPool.scheduleWithFixedDelay(xPacklicenseState::cleanupUsageTracking, TimeValue.timeValueHours(1), threadPool.generic()); } private void logExpirationWarning(long expirationMillis, boolean expired) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java index 91380980a58a7..858681faa205d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/GetFeatureUsageResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.license; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -48,14 +48,14 @@ public FeatureUsageInfo( } public FeatureUsageInfo(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { this.family = in.readOptionalString(); } else { this.family = null; } this.name = in.readString(); this.lastUsedTime = ZonedDateTime.ofInstant(Instant.ofEpochSecond(in.readLong()), ZoneOffset.UTC); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { this.context = in.readOptionalString(); } else { this.context = null; @@ -65,12 +65,12 @@ public FeatureUsageInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalString(this.family); } out.writeString(name); out.writeLong(lastUsedTime.toEpochSecond()); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { out.writeOptionalString(this.context); } out.writeString(licenseLevel); @@ -104,7 +104,7 @@ public GetFeatureUsageResponse(List features) { } public GetFeatureUsageResponse(StreamInput in) throws IOException { - this.features = in.readList(FeatureUsageInfo::new); + this.features = in.readCollectionAsList(FeatureUsageInfo::new); } public List getFeatures() { @@ -113,7 +113,7 @@ public List getFeatures() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(features); + out.writeCollection(features); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java index a13188b76c5f2..08c30a1670f61 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java @@ -7,6 +7,7 @@ package org.elasticsearch.license; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.NamedDiff; @@ -107,7 +108,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java index db2cbb764a87b..cc22d7394babd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartBasicResponse.java @@ -95,7 +95,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeEnum(status); out.writeOptionalString(acknowledgeMessage); - out.writeMap(acknowledgeMessages, StreamOutput::writeString, StreamOutput::writeStringArray); + out.writeMap(acknowledgeMessages, StreamOutput::writeStringArray); } @Override @@ -110,11 +110,7 @@ protected void addCustomFields(XContentBuilder builder, Params params) throws IO builder.startObject("acknowledge"); builder.field(MESSAGE_FIELD.getPreferredName(), acknowledgeMessage); for (Map.Entry entry : acknowledgeMessages.entrySet()) { - builder.startArray(entry.getKey()); - for (String message : entry.getValue()) { - builder.value(message); - } - builder.endArray(); + builder.array(entry.getKey(), entry.getValue()); } builder.endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java index 80281a1da444b..853c3d39e4121 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/PostStartTrialResponse.java @@ -88,7 +88,7 @@ public Status getStatus() { public void writeTo(StreamOutput out) throws IOException { out.writeEnum(status); out.writeOptionalString(acknowledgeMessage); - out.writeMap(acknowledgeMessages, StreamOutput::writeString, StreamOutput::writeStringArray); + out.writeMap(acknowledgeMessages, StreamOutput::writeStringArray); } Map getAcknowledgementMessages() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java index becc73163bf83..6ae1719383afb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/RestPostStartTrialLicense.java @@ -56,11 +56,7 @@ public RestResponse buildResponse(PostStartTrialResponse response, XContentBuild builder.startObject("acknowledge"); builder.field("message", response.getAcknowledgementMessage()); for (Map.Entry entry : acknowledgementMessages.entrySet()) { - builder.startArray(entry.getKey()); - for (String message : entry.getValue()) { - builder.value(message); - } - builder.endArray(); + builder.array(entry.getKey(), entry.getValue()); } builder.endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java index 0cf1091ef28b2..2ac0139ac0309 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoRequest.java @@ -7,6 +7,7 @@ package org.elasticsearch.protocol.xpack; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -97,6 +98,6 @@ public void writeTo(StreamOutput out) throws IOException { } private static boolean hasLicenseVersionField(TransportVersion streamVersion) { - return streamVersion.between(TransportVersion.V_7_8_1, TransportVersion.V_8_0_0); + return streamVersion.between(TransportVersions.V_7_8_1, TransportVersions.V_8_0_0); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java index 8f872dcb056a1..59fd1db1e72f2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/XPackInfoResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -361,7 +361,7 @@ public FeatureSet(String name, boolean available, boolean enabled) { public FeatureSet(StreamInput in) throws IOException { this(in.readString(), readAvailable(in), in.readBoolean()); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { in.readMap(); // backcompat reading native code info, but no longer used here } } @@ -369,7 +369,7 @@ public FeatureSet(StreamInput in) throws IOException { // this is separated out so that the removed description can be read from the stream on construction // TODO: remove this for 8.0 private static boolean readAvailable(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().before(TransportVersions.V_7_3_0)) { in.readOptionalString(); } return in.readBoolean(); @@ -378,12 +378,12 @@ private static boolean readAvailable(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - if (out.getTransportVersion().before(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().before(TransportVersions.V_7_3_0)) { out.writeOptionalString(null); } out.writeBoolean(available); out.writeBoolean(enabled); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeGenericMap(Collections.emptyMap()); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java index 94604dbd8227c..2603002a2348b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/GraphExploreRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.protocol.xpack.graph; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -111,7 +111,7 @@ public GraphExploreRequest(StreamInput in) throws IOException { indices = in.readStringArray(); indicesOptions = IndicesOptions.readIndicesOptions(in); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { String[] types = in.readStringArray(); assert types.length == 0; } @@ -180,7 +180,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(indices); indicesOptions.writeIndicesOptions(out); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { out.writeStringArray(Strings.EMPTY_ARRAY); } out.writeOptionalString(routing); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java index cc03b58631eeb..05a617002d9cc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/graph/Hop.java @@ -71,7 +71,7 @@ void writeTo(StreamOutput out) throws IOException { if (vertices == null) { out.writeVInt(0); } else { - out.writeList(vertices); + out.writeCollection(vertices); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java index ff5b846a7f17d..ca3797a7344be 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/protocol/xpack/license/PutLicenseResponse.java @@ -72,7 +72,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(status.id()); out.writeOptionalString(acknowledgeHeader); - out.writeMap(acknowledgeMessages, StreamOutput::writeString, StreamOutput::writeStringArray); + out.writeMap(acknowledgeMessages, StreamOutput::writeStringArray); } @Override @@ -82,11 +82,7 @@ protected void addCustomFields(XContentBuilder builder, Params params) throws IO builder.startObject("acknowledge"); builder.field("message", acknowledgeHeader); for (Map.Entry entry : acknowledgeMessages.entrySet()) { - builder.startArray(entry.getKey()); - for (String message : entry.getValue()) { - builder.value(message); - } - builder.endArray(); + builder.array(entry.getKey(), entry.getValue()); } builder.endObject(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersResponse.java index a89e0f5614841..b9a5115a9fa1a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/action/MigrateToDataTiersResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.cluster.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -60,13 +60,13 @@ public MigrateToDataTiersResponse( public MigrateToDataTiersResponse(StreamInput in) throws IOException { super(in); removedIndexTemplateName = in.readOptionalString(); - migratedPolicies = in.readStringList(); - migratedIndices = in.readStringList(); + migratedPolicies = in.readStringCollectionAsList(); + migratedIndices = in.readStringCollectionAsList(); dryRun = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { - migratedLegacyTemplates = in.readStringList(); - migratedComposableTemplates = in.readStringList(); - migratedComponentTemplates = in.readStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { + migratedLegacyTemplates = in.readStringCollectionAsList(); + migratedComposableTemplates = in.readStringCollectionAsList(); + migratedComponentTemplates = in.readStringCollectionAsList(); } else { migratedLegacyTemplates = List.of(); migratedComposableTemplates = List.of(); @@ -154,7 +154,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(migratedPolicies); out.writeStringCollection(migratedIndices); out.writeBoolean(dryRun); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_0)) { out.writeStringCollection(migratedLegacyTemplates); out.writeStringCollection(migratedComposableTemplates); out.writeStringCollection(migratedComponentTemplates); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java index 6e7c4046b3531..9231dfb744a36 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDecider.java @@ -10,6 +10,7 @@ import org.elasticsearch.cluster.metadata.DesiredNode; import org.elasticsearch.cluster.metadata.DesiredNodes; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -22,6 +23,8 @@ import org.elasticsearch.common.Strings; import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Set; @@ -64,7 +67,12 @@ private static Decision shouldFilter(IndexMetadata indexMd, DiscoveryNode node, } public interface PreferredTierFunction { - Optional apply(List tierPreference, DiscoveryNodes nodes, DesiredNodes desiredNodes); + Optional apply( + List tierPreference, + DiscoveryNodes nodes, + DesiredNodes desiredNodes, + NodesShutdownMetadata shutdownMetadata + ); } private static final Decision YES_PASSES = Decision.single(Decision.YES.type(), NAME, "node passes tier preference filters"); @@ -79,7 +87,12 @@ public static Decision shouldFilter( if (tierPreference.isEmpty()) { return YES_PASSES; } - Optional tier = preferredTierFunction.apply(tierPreference, allocation.nodes(), allocation.desiredNodes()); + Optional tier = preferredTierFunction.apply( + tierPreference, + allocation.nodes(), + allocation.desiredNodes(), + allocation.getClusterState().metadata().nodeShutdowns() + ); if (tier.isPresent()) { String tierName = tier.get(); if (allocationAllowed(tierName, node)) { @@ -136,14 +149,20 @@ private static Decision debugYesAllowed(RoutingAllocation allocation, List preferredAvailableTier(List prioritizedTiers, DiscoveryNodes nodes, DesiredNodes desiredNodes) { + public static Optional preferredAvailableTier( + List prioritizedTiers, + DiscoveryNodes nodes, + DesiredNodes desiredNodes, + NodesShutdownMetadata shutdownMetadata + ) { + final var desiredNodesPreferredTier = getPreferredTierFromDesiredNodes(prioritizedTiers, nodes, desiredNodes); if (desiredNodesPreferredTier.isPresent()) { return desiredNodesPreferredTier; } - return getPreferredAvailableTierFromClusterMembers(prioritizedTiers, nodes); + return getPreferredAvailableTierFromClusterMembers(prioritizedTiers, nodes, removingNodeIds(shutdownMetadata)); } /** @@ -199,9 +218,13 @@ private static boolean isDesiredNodeWithinTierJoining(String tier, DiscoveryNode return tierNodesPresent(tier, desiredNodes.pending()) && tierNodesPresent(tier, discoveryNodes); } - private static Optional getPreferredAvailableTierFromClusterMembers(List prioritizedTiers, DiscoveryNodes nodes) { + private static Optional getPreferredAvailableTierFromClusterMembers( + List prioritizedTiers, + DiscoveryNodes nodes, + Set removingNodeIds + ) { for (String tier : prioritizedTiers) { - if (tierNodesPresent(tier, nodes)) { + if (tierNodesPresentConsideringRemovals(tier, nodes, removingNodeIds)) { return Optional.of(tier); } } @@ -219,10 +242,40 @@ static boolean tierNodesPresent(String singleTier, Collection nodes return false; } + // This overload for Desired Nodes codepaths, which do not consider Node Shutdown, as Desired Nodes takes precedence static boolean tierNodesPresent(String singleTier, DiscoveryNodes nodes) { + return tierNodesPresentConsideringRemovals(singleTier, nodes, Collections.emptySet()); + } + + static boolean tierNodesPresentConsideringRemovals(String singleTier, DiscoveryNodes nodes, Set removingNodeIds) { assert singleTier.equals(DiscoveryNodeRole.DATA_ROLE.roleName()) || DataTier.validTierName(singleTier) : "tier " + singleTier + " is an invalid tier name"; - return nodes.isRoleAvailable(DiscoveryNodeRole.DATA_ROLE.roleName()) || nodes.isRoleAvailable(singleTier); + var rolesToNodes = nodes.getTiersToNodeIds(); + Set nodesWithTier = rolesToNodes.getOrDefault(singleTier, Collections.emptySet()); + Set dataNodes = rolesToNodes.getOrDefault(DiscoveryNodeRole.DATA_ROLE.roleName(), Collections.emptySet()); + + if (removingNodeIds.isEmpty()) { + return nodesWithTier.isEmpty() == false || dataNodes.isEmpty() == false; + } else if (removingNodeIds.size() < nodesWithTier.size() || removingNodeIds.size() < dataNodes.size()) { + // There are more nodes in the tier (or more generic data nodes) than there are nodes that are being removed, so + // there's at least one node that can hold data for the preferred tier that isn't being removed + return true; + } + + // A tier might be unavailable because all remaining nodes in the tier are being removed, so now we have to check if there are any + // nodes with appropriate roles that aren't being removed. + for (String nodeId : dataNodes) { + if (removingNodeIds.contains(nodeId) == false) { + return true; + } + } + for (String nodeId : nodesWithTier) { + if (removingNodeIds.contains(nodeId) == false) { + return true; + } + } + // All the nodes with roles appropriate for this tier are being removed, so this tier is not available. + return false; } public static boolean allocationAllowed(String tierName, DiscoveryNode node) { @@ -244,4 +297,18 @@ public static boolean allocationAllowed(String tierName, Set } return false; } + + private static Set removingNodeIds(NodesShutdownMetadata shutdownMetadata) { + if (shutdownMetadata.getAll().isEmpty()) { + return Collections.emptySet(); + } + + Set removingNodes = new HashSet<>(); + for (var shutdownEntry : shutdownMetadata.getAll().values()) { + if (shutdownEntry.getType().isRemovalType()) { + removingNodes.add(shutdownEntry.getNodeId()); + } + } + return Collections.unmodifiableSet(removingNodes); + } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java index 8ec138948f2f1..0bf21f66b4888 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/DataTiersFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -42,7 +43,7 @@ public DataTiersFeatureSetUsage(Map tierStats) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_10_0; + return TransportVersions.V_7_10_0; } public Map getTierStats() { @@ -52,7 +53,7 @@ public Map getTierStats() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(tierStats, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(tierStats, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java index 740ae58beef38..789a927dd6bf4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/HealthApiFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.metrics.Counters; @@ -121,7 +122,7 @@ static void enrichUsageStatsWithValues(Map usageStats) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_7_0; + return TransportVersions.V_8_7_0; } public Map stats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java index a251bbfd65303..3a75ce34e22bf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/RemoteClusterFeatureSetUsage.java @@ -23,7 +23,7 @@ public class RemoteClusterFeatureSetUsage extends XPackFeatureSet.Usage { public RemoteClusterFeatureSetUsage(StreamInput in) throws IOException { super(in); - this.remoteConnectionInfos = in.readImmutableList(RemoteConnectionInfo::new); + this.remoteConnectionInfos = in.readCollectionAsImmutableList(RemoteConnectionInfo::new); } public RemoteClusterFeatureSetUsage(List remoteConnectionInfos) { @@ -39,7 +39,7 @@ public TransportVersion getMinimalSupportedVersion() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(remoteConnectionInfos); + out.writeCollection(remoteConnectionInfos); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index 7e2c9dd494086..5610b18ac627c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -6,21 +6,12 @@ */ package org.elasticsearch.xpack.core; -import org.elasticsearch.action.ActionResponse; -import org.elasticsearch.action.ActionType; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; -import org.elasticsearch.license.DeleteLicenseAction; -import org.elasticsearch.license.GetBasicStatusAction; -import org.elasticsearch.license.GetLicenseAction; -import org.elasticsearch.license.GetTrialStatusAction; import org.elasticsearch.license.LicensesMetadata; -import org.elasticsearch.license.PostStartBasicAction; -import org.elasticsearch.license.PostStartTrialAction; -import org.elasticsearch.license.PutLicenseAction; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; import org.elasticsearch.plugins.ActionPlugin; @@ -29,27 +20,20 @@ import org.elasticsearch.tasks.Task; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; -import org.elasticsearch.xpack.cluster.action.MigrateToDataTiersAction; -import org.elasticsearch.xpack.core.action.XPackInfoAction; -import org.elasticsearch.xpack.core.action.XPackUsageAction; import org.elasticsearch.xpack.core.aggregatemetric.AggregateMetricFeatureSetUsage; import org.elasticsearch.xpack.core.analytics.AnalyticsFeatureSetUsage; import org.elasticsearch.xpack.core.application.EnterpriseSearchFeatureSetUsage; import org.elasticsearch.xpack.core.archive.ArchiveFeatureSetUsage; -import org.elasticsearch.xpack.core.async.DeleteAsyncResultAction; import org.elasticsearch.xpack.core.ccr.AutoFollowMetadata; import org.elasticsearch.xpack.core.datastreams.DataStreamFeatureSetUsage; import org.elasticsearch.xpack.core.datastreams.DataStreamLifecycleFeatureSetUsage; -import org.elasticsearch.xpack.core.downsample.DownsampleIndexerAction; import org.elasticsearch.xpack.core.downsample.DownsampleShardStatus; import org.elasticsearch.xpack.core.enrich.EnrichFeatureSetUsage; import org.elasticsearch.xpack.core.enrich.action.ExecuteEnrichPolicyStatus; import org.elasticsearch.xpack.core.eql.EqlFeatureSetUsage; import org.elasticsearch.xpack.core.esql.EsqlFeatureSetUsage; import org.elasticsearch.xpack.core.frozen.FrozenIndicesFeatureSetUsage; -import org.elasticsearch.xpack.core.frozen.action.FreezeIndexAction; import org.elasticsearch.xpack.core.graph.GraphFeatureSetUsage; -import org.elasticsearch.xpack.core.graph.action.GraphExploreAction; import org.elasticsearch.xpack.core.ilm.AllocateAction; import org.elasticsearch.xpack.core.ilm.DeleteAction; import org.elasticsearch.xpack.core.ilm.DownsampleAction; @@ -69,79 +53,13 @@ import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; import org.elasticsearch.xpack.core.ilm.UnfollowAction; import org.elasticsearch.xpack.core.ilm.WaitForSnapshotAction; -import org.elasticsearch.xpack.core.ilm.action.DeleteLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.ExplainLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.GetLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.MoveToStepAction; -import org.elasticsearch.xpack.core.ilm.action.PutLifecycleAction; -import org.elasticsearch.xpack.core.ilm.action.RemoveIndexLifecyclePolicyAction; -import org.elasticsearch.xpack.core.ilm.action.RetryAction; import org.elasticsearch.xpack.core.logstash.LogstashFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MachineLearningFeatureSetUsage; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; -import org.elasticsearch.xpack.core.ml.action.CloseJobAction; -import org.elasticsearch.xpack.core.ml.action.DeleteCalendarAction; -import org.elasticsearch.xpack.core.ml.action.DeleteCalendarEventAction; -import org.elasticsearch.xpack.core.ml.action.DeleteDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.DeleteDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.DeleteExpiredDataAction; -import org.elasticsearch.xpack.core.ml.action.DeleteFilterAction; -import org.elasticsearch.xpack.core.ml.action.DeleteForecastAction; -import org.elasticsearch.xpack.core.ml.action.DeleteJobAction; -import org.elasticsearch.xpack.core.ml.action.DeleteModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.DeleteTrainedModelAction; -import org.elasticsearch.xpack.core.ml.action.EvaluateDataFrameAction; -import org.elasticsearch.xpack.core.ml.action.ExplainDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.FinalizeJobExecutionAction; -import org.elasticsearch.xpack.core.ml.action.FlushJobAction; -import org.elasticsearch.xpack.core.ml.action.ForecastJobAction; -import org.elasticsearch.xpack.core.ml.action.GetBucketsAction; -import org.elasticsearch.xpack.core.ml.action.GetCalendarEventsAction; -import org.elasticsearch.xpack.core.ml.action.GetCalendarsAction; -import org.elasticsearch.xpack.core.ml.action.GetCategoriesAction; -import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.GetDataFrameAnalyticsStatsAction; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsAction; -import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; -import org.elasticsearch.xpack.core.ml.action.GetFiltersAction; -import org.elasticsearch.xpack.core.ml.action.GetInfluencersAction; -import org.elasticsearch.xpack.core.ml.action.GetJobsAction; -import org.elasticsearch.xpack.core.ml.action.GetJobsStatsAction; -import org.elasticsearch.xpack.core.ml.action.GetModelSnapshotsAction; -import org.elasticsearch.xpack.core.ml.action.GetOverallBucketsAction; -import org.elasticsearch.xpack.core.ml.action.GetRecordsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsAction; -import org.elasticsearch.xpack.core.ml.action.GetTrainedModelsStatsAction; -import org.elasticsearch.xpack.core.ml.action.InferModelAction; -import org.elasticsearch.xpack.core.ml.action.IsolateDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.KillProcessAction; -import org.elasticsearch.xpack.core.ml.action.MlInfoAction; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; -import org.elasticsearch.xpack.core.ml.action.PersistJobAction; -import org.elasticsearch.xpack.core.ml.action.PostCalendarEventsAction; -import org.elasticsearch.xpack.core.ml.action.PostDataAction; -import org.elasticsearch.xpack.core.ml.action.PreviewDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.PutCalendarAction; -import org.elasticsearch.xpack.core.ml.action.PutDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.PutDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.PutFilterAction; -import org.elasticsearch.xpack.core.ml.action.PutJobAction; -import org.elasticsearch.xpack.core.ml.action.PutTrainedModelAction; -import org.elasticsearch.xpack.core.ml.action.RevertModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.SetUpgradeModeAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.StopDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.UpdateCalendarJobAction; -import org.elasticsearch.xpack.core.ml.action.UpdateDataFrameAnalyticsAction; -import org.elasticsearch.xpack.core.ml.action.UpdateDatafeedAction; -import org.elasticsearch.xpack.core.ml.action.UpdateFilterAction; -import org.elasticsearch.xpack.core.ml.action.UpdateJobAction; -import org.elasticsearch.xpack.core.ml.action.UpdateModelSnapshotAction; -import org.elasticsearch.xpack.core.ml.action.UpdateProcessAction; -import org.elasticsearch.xpack.core.ml.action.ValidateDetectorAction; -import org.elasticsearch.xpack.core.ml.action.ValidateJobConfigAction; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; @@ -150,40 +68,10 @@ import org.elasticsearch.xpack.core.monitoring.MonitoringFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupFeatureSetUsage; import org.elasticsearch.xpack.core.rollup.RollupField; -import org.elasticsearch.xpack.core.rollup.action.DeleteRollupJobAction; -import org.elasticsearch.xpack.core.rollup.action.GetRollupCapsAction; -import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; -import org.elasticsearch.xpack.core.rollup.action.PutRollupJobAction; -import org.elasticsearch.xpack.core.rollup.action.RollupSearchAction; -import org.elasticsearch.xpack.core.rollup.action.StartRollupJobAction; -import org.elasticsearch.xpack.core.rollup.action.StopRollupJobAction; import org.elasticsearch.xpack.core.rollup.job.RollupJob; import org.elasticsearch.xpack.core.rollup.job.RollupJobStatus; -import org.elasticsearch.xpack.core.search.action.GetAsyncSearchAction; -import org.elasticsearch.xpack.core.search.action.SubmitAsyncSearchAction; import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotFeatureSetUsage; import org.elasticsearch.xpack.core.security.SecurityFeatureSetUsage; -import org.elasticsearch.xpack.core.security.action.apikey.CreateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.apikey.GetApiKeyAction; -import org.elasticsearch.xpack.core.security.action.apikey.InvalidateApiKeyAction; -import org.elasticsearch.xpack.core.security.action.realm.ClearRealmCacheAction; -import org.elasticsearch.xpack.core.security.action.role.ClearRolesCacheAction; -import org.elasticsearch.xpack.core.security.action.role.DeleteRoleAction; -import org.elasticsearch.xpack.core.security.action.role.GetRolesAction; -import org.elasticsearch.xpack.core.security.action.role.PutRoleAction; -import org.elasticsearch.xpack.core.security.action.rolemapping.DeleteRoleMappingAction; -import org.elasticsearch.xpack.core.security.action.rolemapping.GetRoleMappingsAction; -import org.elasticsearch.xpack.core.security.action.rolemapping.PutRoleMappingAction; -import org.elasticsearch.xpack.core.security.action.token.CreateTokenAction; -import org.elasticsearch.xpack.core.security.action.token.InvalidateTokenAction; -import org.elasticsearch.xpack.core.security.action.token.RefreshTokenAction; -import org.elasticsearch.xpack.core.security.action.user.AuthenticateAction; -import org.elasticsearch.xpack.core.security.action.user.ChangePasswordAction; -import org.elasticsearch.xpack.core.security.action.user.DeleteUserAction; -import org.elasticsearch.xpack.core.security.action.user.GetUsersAction; -import org.elasticsearch.xpack.core.security.action.user.HasPrivilegesAction; -import org.elasticsearch.xpack.core.security.action.user.PutUserAction; -import org.elasticsearch.xpack.core.security.action.user.SetEnabledAction; import org.elasticsearch.xpack.core.security.authc.TokenMetadata; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AllExpression; import org.elasticsearch.xpack.core.security.authc.support.mapper.expressiondsl.AnyExpression; @@ -194,26 +82,11 @@ import org.elasticsearch.xpack.core.security.authz.privilege.ConfigurableClusterPrivileges; import org.elasticsearch.xpack.core.slm.SLMFeatureSetUsage; import org.elasticsearch.xpack.core.slm.SnapshotLifecycleMetadata; -import org.elasticsearch.xpack.core.slm.action.DeleteSnapshotLifecycleAction; -import org.elasticsearch.xpack.core.slm.action.ExecuteSnapshotLifecycleAction; -import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleAction; -import org.elasticsearch.xpack.core.slm.action.GetSnapshotLifecycleStatsAction; -import org.elasticsearch.xpack.core.slm.action.PutSnapshotLifecycleAction; import org.elasticsearch.xpack.core.spatial.SpatialFeatureSetUsage; import org.elasticsearch.xpack.core.sql.SqlFeatureSetUsage; -import org.elasticsearch.xpack.core.ssl.action.GetCertificateInfoAction; -import org.elasticsearch.xpack.core.termsenum.action.TermsEnumAction; -import org.elasticsearch.xpack.core.textstructure.action.FindStructureAction; import org.elasticsearch.xpack.core.transform.TransformFeatureSetUsage; import org.elasticsearch.xpack.core.transform.TransformField; import org.elasticsearch.xpack.core.transform.TransformMetadata; -import org.elasticsearch.xpack.core.transform.action.DeleteTransformAction; -import org.elasticsearch.xpack.core.transform.action.GetTransformAction; -import org.elasticsearch.xpack.core.transform.action.GetTransformStatsAction; -import org.elasticsearch.xpack.core.transform.action.PreviewTransformAction; -import org.elasticsearch.xpack.core.transform.action.PutTransformAction; -import org.elasticsearch.xpack.core.transform.action.StartTransformAction; -import org.elasticsearch.xpack.core.transform.action.StopTransformAction; import org.elasticsearch.xpack.core.transform.transforms.NullRetentionPolicyConfig; import org.elasticsearch.xpack.core.transform.transforms.RetentionPolicyConfig; import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; @@ -224,14 +97,6 @@ import org.elasticsearch.xpack.core.votingonly.VotingOnlyNodeFeatureSetUsage; import org.elasticsearch.xpack.core.watcher.WatcherFeatureSetUsage; import org.elasticsearch.xpack.core.watcher.WatcherMetadata; -import org.elasticsearch.xpack.core.watcher.transport.actions.ack.AckWatchAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.activate.ActivateWatchAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.delete.DeleteWatchAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.execute.ExecuteWatchAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.get.GetWatchAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.put.PutWatchAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.service.WatcherServiceAction; -import org.elasticsearch.xpack.core.watcher.transport.actions.stats.WatcherStatsAction; import java.util.ArrayList; import java.util.Arrays; @@ -257,165 +122,6 @@ public List> getSettings() { return settings; } - @Override - public List> getClientActions() { - return Arrays.asList( - // graph - GraphExploreAction.INSTANCE, - // ML - GetJobsAction.INSTANCE, - GetJobsStatsAction.INSTANCE, - MlInfoAction.INSTANCE, - PutJobAction.INSTANCE, - UpdateJobAction.INSTANCE, - DeleteJobAction.INSTANCE, - OpenJobAction.INSTANCE, - GetFiltersAction.INSTANCE, - PutFilterAction.INSTANCE, - UpdateFilterAction.INSTANCE, - DeleteFilterAction.INSTANCE, - KillProcessAction.INSTANCE, - GetBucketsAction.INSTANCE, - GetInfluencersAction.INSTANCE, - GetOverallBucketsAction.INSTANCE, - GetRecordsAction.INSTANCE, - PostDataAction.INSTANCE, - CloseJobAction.INSTANCE, - FinalizeJobExecutionAction.INSTANCE, - FlushJobAction.INSTANCE, - ValidateDetectorAction.INSTANCE, - ValidateJobConfigAction.INSTANCE, - GetCategoriesAction.INSTANCE, - GetModelSnapshotsAction.INSTANCE, - RevertModelSnapshotAction.INSTANCE, - UpdateModelSnapshotAction.INSTANCE, - GetDatafeedsAction.INSTANCE, - GetDatafeedsStatsAction.INSTANCE, - PutDatafeedAction.INSTANCE, - UpdateDatafeedAction.INSTANCE, - DeleteDatafeedAction.INSTANCE, - PreviewDatafeedAction.INSTANCE, - StartDatafeedAction.INSTANCE, - StopDatafeedAction.INSTANCE, - IsolateDatafeedAction.INSTANCE, - DeleteModelSnapshotAction.INSTANCE, - UpdateProcessAction.INSTANCE, - DeleteExpiredDataAction.INSTANCE, - ForecastJobAction.INSTANCE, - DeleteForecastAction.INSTANCE, - GetCalendarsAction.INSTANCE, - PutCalendarAction.INSTANCE, - DeleteCalendarAction.INSTANCE, - DeleteCalendarEventAction.INSTANCE, - UpdateCalendarJobAction.INSTANCE, - GetCalendarEventsAction.INSTANCE, - PostCalendarEventsAction.INSTANCE, - PersistJobAction.INSTANCE, - SetUpgradeModeAction.INSTANCE, - PutDataFrameAnalyticsAction.INSTANCE, - GetDataFrameAnalyticsAction.INSTANCE, - GetDataFrameAnalyticsStatsAction.INSTANCE, - UpdateDataFrameAnalyticsAction.INSTANCE, - DeleteDataFrameAnalyticsAction.INSTANCE, - StartDataFrameAnalyticsAction.INSTANCE, - EvaluateDataFrameAction.INSTANCE, - ExplainDataFrameAnalyticsAction.INSTANCE, - InferModelAction.INSTANCE, - InferModelAction.EXTERNAL_INSTANCE, - GetTrainedModelsAction.INSTANCE, - DeleteTrainedModelAction.INSTANCE, - GetTrainedModelsStatsAction.INSTANCE, - PutTrainedModelAction.INSTANCE, - // security - ClearRealmCacheAction.INSTANCE, - ClearRolesCacheAction.INSTANCE, - GetUsersAction.INSTANCE, - PutUserAction.INSTANCE, - DeleteUserAction.INSTANCE, - GetRolesAction.INSTANCE, - PutRoleAction.INSTANCE, - DeleteRoleAction.INSTANCE, - ChangePasswordAction.INSTANCE, - AuthenticateAction.INSTANCE, - SetEnabledAction.INSTANCE, - HasPrivilegesAction.INSTANCE, - GetRoleMappingsAction.INSTANCE, - PutRoleMappingAction.INSTANCE, - DeleteRoleMappingAction.INSTANCE, - CreateTokenAction.INSTANCE, - InvalidateTokenAction.INSTANCE, - GetCertificateInfoAction.INSTANCE, - RefreshTokenAction.INSTANCE, - CreateApiKeyAction.INSTANCE, - InvalidateApiKeyAction.INSTANCE, - GetApiKeyAction.INSTANCE, - // watcher - PutWatchAction.INSTANCE, - DeleteWatchAction.INSTANCE, - GetWatchAction.INSTANCE, - WatcherStatsAction.INSTANCE, - AckWatchAction.INSTANCE, - ActivateWatchAction.INSTANCE, - WatcherServiceAction.INSTANCE, - ExecuteWatchAction.INSTANCE, - // license - PutLicenseAction.INSTANCE, - GetLicenseAction.INSTANCE, - DeleteLicenseAction.INSTANCE, - PostStartTrialAction.INSTANCE, - GetTrialStatusAction.INSTANCE, - PostStartBasicAction.INSTANCE, - GetBasicStatusAction.INSTANCE, - // x-pack - XPackInfoAction.INSTANCE, - XPackUsageAction.INSTANCE, - // rollup - RollupSearchAction.INSTANCE, - PutRollupJobAction.INSTANCE, - StartRollupJobAction.INSTANCE, - StopRollupJobAction.INSTANCE, - DeleteRollupJobAction.INSTANCE, - GetRollupJobsAction.INSTANCE, - GetRollupCapsAction.INSTANCE, - // ILM - DeleteLifecycleAction.INSTANCE, - GetLifecycleAction.INSTANCE, - PutLifecycleAction.INSTANCE, - ExplainLifecycleAction.INSTANCE, - RemoveIndexLifecyclePolicyAction.INSTANCE, - MoveToStepAction.INSTANCE, - RetryAction.INSTANCE, - PutSnapshotLifecycleAction.INSTANCE, - GetSnapshotLifecycleAction.INSTANCE, - DeleteSnapshotLifecycleAction.INSTANCE, - ExecuteSnapshotLifecycleAction.INSTANCE, - GetSnapshotLifecycleStatsAction.INSTANCE, - MigrateToDataTiersAction.INSTANCE, - - // Freeze - FreezeIndexAction.INSTANCE, - // Data Frame - PutTransformAction.INSTANCE, - StartTransformAction.INSTANCE, - StopTransformAction.INSTANCE, - DeleteTransformAction.INSTANCE, - GetTransformAction.INSTANCE, - GetTransformStatsAction.INSTANCE, - PreviewTransformAction.INSTANCE, - // Async Search - SubmitAsyncSearchAction.INSTANCE, - GetAsyncSearchAction.INSTANCE, - DeleteAsyncResultAction.INSTANCE, - // Text Structure - FindStructureAction.INSTANCE, - // Terms enum API - TermsEnumAction.INSTANCE, - // TSDB Downsampling - DownsampleIndexerAction.INSTANCE, - org.elasticsearch.action.downsample.DownsampleAction.INSTANCE - ); - } - @Override public List getNamedWriteables() { return Stream.of( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java index 169cc4f4009e8..e6ed59539d161 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/XPackUsageResponse.java @@ -24,7 +24,7 @@ public XPackUsageResponse(final List usages) { } public XPackUsageResponse(final StreamInput in) throws IOException { - usages = in.readNamedWriteableList(XPackFeatureSet.Usage.class); + usages = in.readNamedWriteableCollectionAsList(XPackFeatureSet.Usage.class); } public List getUsages() { @@ -41,7 +41,7 @@ public void writeTo(final StreamOutput out) throws IOException { } private static void writeTo(final StreamOutput out, final List usages) throws IOException { - out.writeNamedWriteableList(usages); + out.writeNamedWriteableCollection(usages); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/util/QueryPage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/util/QueryPage.java index b1303246337b0..4c8ccc17f940c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/util/QueryPage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/action/util/QueryPage.java @@ -43,7 +43,7 @@ public QueryPage(List results, long count, ParseField resultsField) { public QueryPage(StreamInput in, Reader hitReader) throws IOException { resultsField = new ParseField(in.readString()); - results = in.readList(hitReader); + results = in.readCollectionAsList(hitReader); count = in.readLong(); } @@ -54,7 +54,7 @@ public static ResourceNotFoundException emptyQueryPage(ParseField resultsField) @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(resultsField.getPreferredName()); - out.writeList(results); + out.writeCollection(results); out.writeLong(count); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java index 441e62326640b..56a2fad47cf2f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/aggregatemetric/AggregateMetricFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.aggregatemetric; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; @@ -27,7 +28,7 @@ public AggregateMetricFeatureSetUsage(boolean available, boolean enabled) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_11_0; + return TransportVersions.V_7_11_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java index 4c5b198a7565b..89bd749f2ea1d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/AnalyticsFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.analytics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -47,7 +48,7 @@ protected void innerXContent(XContentBuilder builder, Params params) throws IOEx @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java index dd0ed88d5f13e..ad4c54b0d0465 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/analytics/action/AnalyticsStatsAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.analytics.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.nodes.BaseNodeResponse; @@ -107,12 +107,12 @@ public Response(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public EnumCounters getStats() { @@ -144,15 +144,15 @@ public NodeResponse(DiscoveryNode node, EnumCounters counters) { public NodeResponse(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { counters = new EnumCounters<>(in, Item.class); } else { counters = new EnumCounters<>(Item.class); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { counters.inc(Item.BOXPLOT, in.readVLong()); } counters.inc(Item.CUMULATIVE_CARDINALITY, in.readZLong()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { counters.inc(Item.STRING_STATS, in.readVLong()); counters.inc(Item.TOP_METRICS, in.readVLong()); } @@ -162,14 +162,14 @@ public NodeResponse(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_8_0)) { counters.writeTo(out); } else { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { out.writeVLong(counters.get(Item.BOXPLOT)); } out.writeZLong(counters.get(Item.CUMULATIVE_CARDINALITY)); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { out.writeVLong(counters.get(Item.STRING_STATS)); out.writeVLong(counters.get(Item.TOP_METRICS)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java index 0d49c9e2a8c84..505d85c764b17 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/application/EnterpriseSearchFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.application; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -21,8 +22,8 @@ public class EnterpriseSearchFeatureSetUsage extends XPackFeatureSet.Usage { - static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersion.V_8_8_1; - static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersion.V_8_500_046; + static final TransportVersion BEHAVIORAL_ANALYTICS_TRANSPORT_VERSION = TransportVersions.V_8_8_1; + static final TransportVersion QUERY_RULES_TRANSPORT_VERSION = TransportVersions.V_8_500_046; public static final String SEARCH_APPLICATIONS = "search_applications"; public static final String ANALYTICS_COLLECTIONS = "analytics_collections"; @@ -80,7 +81,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_8_0; + return TransportVersions.V_8_8_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java index 79a386ce2147a..4201a30034786 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/archive/ArchiveFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.archive; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ToXContent; @@ -29,7 +30,7 @@ public ArchiveFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_3_0; + return TransportVersions.V_8_3_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java index be6c1a29bcc6f..e1fa08f3c9bea 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskIndexService.java @@ -12,6 +12,7 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.delete.DeleteRequest; @@ -569,7 +570,7 @@ private void writeResponse(R response, OutputStream os) throws IOException { os = Streams.noCloseStream(os); TransportVersion minNodeVersion = clusterService.state().getMinTransportVersion(); TransportVersion.writeVersion(minNodeVersion, new OutputStreamStreamOutput(os)); - if (minNodeVersion.onOrAfter(TransportVersion.V_7_15_0)) { + if (minNodeVersion.onOrAfter(TransportVersions.V_7_15_0)) { os = CompressorFactory.COMPRESSOR.threadLocalOutputStream(os); } try (OutputStreamStreamOutput out = new OutputStreamStreamOutput(os)) { @@ -594,7 +595,7 @@ public int read() { }); TransportVersion version = TransportVersion.readVersion(new InputStreamStreamInput(encodedIn)); assert version.onOrBefore(TransportVersion.current()) : version + " >= " + TransportVersion.current(); - if (version.onOrAfter(TransportVersion.V_7_15_0)) { + if (version.onOrAfter(TransportVersions.V_7_15_0)) { encodedIn = CompressorFactory.COMPRESSOR.threadLocalInputStream(encodedIn); } try (StreamInput in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(encodedIn), registry)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskMaintenanceService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskMaintenanceService.java index 8ed679d7b225c..8a504c5d7ce20 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskMaintenanceService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/async/AsyncTaskMaintenanceService.java @@ -162,7 +162,7 @@ synchronized void executeNextCleanup() { synchronized void scheduleNextCleanup() { if (isCleanupRunning) { try { - cancellable = threadPool.schedule(this::executeNextCleanup, delay, ThreadPool.Names.GENERIC); + cancellable = threadPool.schedule(this::executeNextCleanup, delay, threadPool.generic()); } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { logger.debug("failed to schedule next maintenance task; shutting down", e); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index 822b3829a4e94..f5620b557bf26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -137,18 +138,14 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(patterns, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); - out.writeMapOfLists(followedLeaderIndexUUIDs, StreamOutput::writeString, StreamOutput::writeString); - out.writeMap( - headers, - StreamOutput::writeString, - (valOut, header) -> valOut.writeMap(header, StreamOutput::writeString, StreamOutput::writeString) - ); + out.writeMap(patterns, StreamOutput::writeWriteable); + out.writeMap(followedLeaderIndexUUIDs, StreamOutput::writeStringCollection); + out.writeMap(headers, (valOut, header) -> valOut.writeMap(header, StreamOutput::writeString)); } @Override @@ -264,10 +261,10 @@ public AutoFollowPattern( public static AutoFollowPattern readFrom(StreamInput in) throws IOException { final String remoteCluster = in.readString(); - final List leaderIndexPatterns = in.readStringList(); + final List leaderIndexPatterns = in.readStringCollectionAsList(); final String followIndexPattern = in.readOptionalString(); final Settings settings; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { settings = Settings.readSettingsFromStream(in); } else { settings = Settings.EMPTY; @@ -287,13 +284,13 @@ private AutoFollowPattern( this.leaderIndexPatterns = leaderIndexPatterns; this.followIndexPattern = followIndexPattern; this.settings = Objects.requireNonNull(settings); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { this.active = in.readBoolean(); } else { this.active = true; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { - this.leaderIndexExclusionPatterns = in.readStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { + this.leaderIndexExclusionPatterns = in.readStringCollectionAsList(); } else { this.leaderIndexExclusionPatterns = Collections.emptyList(); } @@ -352,14 +349,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexPattern); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { settings.writeTo(out); } super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { out.writeBoolean(active); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeStringCollection(leaderIndexExclusionPatterns); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java index da83545d3a327..14e652982e4fb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowStats.java @@ -133,11 +133,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(numberOfFailedFollowIndices); out.writeVLong(numberOfFailedRemoteClusterStateRequests); out.writeVLong(numberOfSuccessfulFollowIndices); - out.writeMap(recentAutoFollowErrors, StreamOutput::writeString, (out1, value) -> { + out.writeMap(recentAutoFollowErrors, (out1, value) -> { out1.writeZLong(value.v1()); out1.writeException(value.v2()); }); - out.writeMap(autoFollowedClusters, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(autoFollowedClusters, StreamOutput::writeWriteable); } public long getNumberOfFailedFollowIndices() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java index 4be42f37a9cdc..e984b4363b4df 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/ShardFollowNodeTaskStatus.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.ccr; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -413,7 +413,7 @@ public ShardFollowNodeTaskStatus(final StreamInput in) throws IOException { this.writeBufferSizeInBytes = in.readVLong(); this.followerMappingVersion = in.readVLong(); this.followerSettingsVersion = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { this.followerAliasesVersion = in.readVLong(); } else { this.followerAliasesVersion = 0L; @@ -457,7 +457,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeVLong(writeBufferSizeInBytes); out.writeVLong(followerMappingVersion); out.writeVLong(followerSettingsVersion); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { out.writeVLong(followerAliasesVersion); } out.writeVLong(totalReadTimeMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java index 037e413c89b42..b6e771e07ac46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowInfoAction.java @@ -97,12 +97,12 @@ public List getFollowInfos() { public Response(StreamInput in) throws IOException { super(in); - followInfos = in.readList(FollowerInfo::new); + followInfos = in.readCollectionAsList(FollowerInfo::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(followInfos); + out.writeCollection(followInfos); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java index ac7d27d7f28bc..726257910c9a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/FollowStatsAction.java @@ -64,13 +64,13 @@ public StatsResponses( public StatsResponses(StreamInput in) throws IOException { super(in); - statsResponse = in.readList(StatsResponse::new); + statsResponse = in.readCollectionAsList(StatsResponse::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(statsResponse); + out.writeCollection(statsResponse); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java index cfe46f8161b1f..8a327735052b5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/GetAutoFollowPatternAction.java @@ -93,7 +93,7 @@ public Response(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(autoFollowPatterns, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(autoFollowPatterns, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java index 46ed48512d2e7..8154067e72b18 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutAutoFollowPatternAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -183,14 +183,14 @@ public Request(StreamInput in) throws IOException { super(in); name = in.readString(); remoteCluster = in.readString(); - leaderIndexPatterns = in.readStringList(); + leaderIndexPatterns = in.readStringCollectionAsList(); followIndexNamePattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { settings = Settings.readSettingsFromStream(in); } parameters = new FollowParameters(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { - leaderIndexExclusionPatterns = in.readStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { + leaderIndexExclusionPatterns = in.readStringCollectionAsList(); } } @@ -201,11 +201,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeStringCollection(leaderIndexPatterns); out.writeOptionalString(followIndexNamePattern); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { settings.writeTo(out); } parameters.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeStringCollection(leaderIndexExclusionPatterns); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java index 710d37b9c6c6f..0d8c120303cb5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/PutFollowAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ccr.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -193,12 +193,12 @@ public Request(StreamInput in) throws IOException { this.remoteCluster = in.readString(); this.leaderIndex = in.readString(); this.followerIndex = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { this.settings = Settings.readSettingsFromStream(in); } this.parameters = new FollowParameters(in); waitForActiveShards(ActiveShardCount.readFrom(in)); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.dataStreamName = in.readOptionalString(); } } @@ -209,12 +209,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(remoteCluster); out.writeString(leaderIndex); out.writeString(followerIndex); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_9_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_9_0)) { settings.writeTo(out); } parameters.writeTo(out); waitForActiveShards.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalString(this.dataStreamName); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ShardFollowTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ShardFollowTask.java index b1d6c5583afa2..4b2b840d6dfdc 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ShardFollowTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/action/ShardFollowTask.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ccr.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -152,7 +153,7 @@ public void writeTo(StreamOutput out) throws IOException { followShardId.writeTo(out); leaderShardId.writeTo(out); super.writeTo(out); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); } public static ShardFollowTask fromXContent(XContentParser parser) { @@ -197,6 +198,6 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java index 9fd73b660dd48..d411512275fc1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.datastreams; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -40,7 +41,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java index e077334b20be6..edac3498ca4e4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/datastreams/DataStreamLifecycleFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.datastreams; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -47,7 +48,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_500_010; + return TransportVersions.V_8_500_020; } @Override @@ -111,7 +112,7 @@ public LifecycleStats( } public static LifecycleStats read(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { return new LifecycleStats(in.readVLong(), in.readVLong(), in.readVLong(), in.readDouble(), in.readBoolean()); } else { return INITIAL; @@ -120,7 +121,7 @@ public static LifecycleStats read(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeVLong(dataStreamsWithLifecyclesCount); out.writeVLong(minRetentionMillis); out.writeVLong(maxRetentionMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java index 5a7baa444b8c9..2949ddfdaf5f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleIndexerAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.downsample; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.downsample.DownsampleAction; @@ -69,7 +69,7 @@ public Request() {} public Request(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_030) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_030) && in.readBoolean()) { this.indexStartTimeMillis = in.readVLong(); this.indexEndTimeMillis = in.readVLong(); } else { @@ -132,7 +132,7 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_030)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_030)) { out.writeBoolean(true); out.writeVLong(indexStartTimeMillis); out.writeVLong(indexEndTimeMillis); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java index b19afa5ee8c74..3190fb095cbca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/downsample/DownsampleShardStatus.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.downsample; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -144,7 +144,7 @@ public DownsampleShardStatus(StreamInput in) throws IOException { numSent = in.readLong(); numIndexed = in.readLong(); numFailed = in.readLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_030) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_030) && in.readBoolean()) { totalShardDocCount = in.readVLong(); lastSourceTimestamp = in.readVLong(); lastTargetTimestamp = in.readVLong(); @@ -254,7 +254,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(numSent); out.writeLong(numIndexed); out.writeLong(numFailed); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_030)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_030)) { out.writeBoolean(true); out.writeVLong(totalShardDocCount); out.writeVLong(lastSourceTimestamp); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java index 8b5d85ec604be..ab058909761d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.enrich; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; @@ -26,6 +27,6 @@ public EnrichFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_5_0; + return TransportVersions.V_7_5_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java index 6908c4f9f72d6..0cd5d617752f4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.StreamInput; @@ -83,7 +84,7 @@ public EnumSet context() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_5_0; + return TransportVersions.V_7_5_0; } @Override @@ -93,7 +94,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policies, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(policies, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java index 52a15653d4124..600e065900d30 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichPolicy.java @@ -114,9 +114,9 @@ public EnrichPolicy(StreamInput in) throws IOException { this( in.readString(), in.readOptionalWriteable(QuerySource::new), - in.readStringList(), + in.readStringCollectionAsList(), in.readString(), - in.readStringList(), + in.readStringCollectionAsList(), Version.readVersion(in) ); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java index 9f66164270884..c3b6253cf4def 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/EnrichStatsAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.enrich.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -60,9 +60,9 @@ public Response(List executingPolicies, List public Response(StreamInput in) throws IOException { super(in); - executingPolicies = in.readList(ExecutingPolicy::new); - coordinatorStats = in.readList(CoordinatorStats::new); - cacheStats = in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) ? in.readList(CacheStats::new) : null; + executingPolicies = in.readCollectionAsList(ExecutingPolicy::new); + coordinatorStats = in.readCollectionAsList(CoordinatorStats::new); + cacheStats = in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) ? in.readCollectionAsList(CacheStats::new) : null; } public List getExecutingPolicies() { @@ -79,10 +79,10 @@ public List getCacheStats() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(executingPolicies); - out.writeList(coordinatorStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { - out.writeList(cacheStats); + out.writeCollection(executingPolicies); + out.writeCollection(coordinatorStats); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { + out.writeCollection(cacheStats); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyStatus.java index 89f9819409ca3..9b1eae2434d56 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/ExecuteEnrichPolicyStatus.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.enrich.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.tasks.Task; @@ -46,7 +46,7 @@ public ExecuteEnrichPolicyStatus(ExecuteEnrichPolicyStatus status, String step) public ExecuteEnrichPolicyStatus(StreamInput in) throws IOException { this.phase = in.readString(); - this.step = in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) ? in.readOptionalString() : null; + this.step = in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) ? in.readOptionalString() : null; } public String getPhase() { @@ -69,7 +69,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(phase); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalString(step); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java index 7396d22f184c3..d7428ce2e4a26 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/action/GetEnrichPolicyAction.java @@ -48,7 +48,7 @@ public Request(String[] names) { public Request(StreamInput in) throws IOException { super(in); - this.names = in.readStringList(); + this.names = in.readStringCollectionAsList(); } @Override @@ -94,12 +94,12 @@ public Response(Map policies) { } public Response(StreamInput in) throws IOException { - policies = in.readList(EnrichPolicy.NamedPolicy::new); + policies = in.readCollectionAsList(EnrichPolicy.NamedPolicy::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(policies); + out.writeCollection(policies); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java index 77ebc85f9a1e4..0f384ef2a66fa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/eql/EqlFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.eql; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -53,7 +54,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java index 6d191cc4cab3c..4f68ff1db6033 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/esql/EsqlFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.esql; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -62,7 +63,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_500_062; + return TransportVersions.V_8_500_062; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java index a5976c14d67c8..a8702560e4804 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/frozen/FrozenIndicesFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.frozen; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -27,7 +28,7 @@ public FrozenIndicesFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java index 9576be5c6ddbb..2ac1c11ce9147 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/graph/GraphFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.graph; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; @@ -25,7 +26,7 @@ public GraphFeatureSetUsage(boolean available, boolean enabled) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java index 6f0e41e89ca9f..04a04b5ef4f41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AllocateAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; @@ -123,7 +123,7 @@ public AllocateAction( public AllocateAction(StreamInput in) throws IOException { this( in.readOptionalVInt(), - in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) ? in.readOptionalInt() : null, + in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) ? in.readOptionalInt() : null, (Map) in.readGenericValue(), (Map) in.readGenericValue(), (Map) in.readGenericValue() @@ -153,7 +153,7 @@ public Map getRequire() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(numberOfReplicas); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalInt(totalShardsPerNode); } out.writeGenericValue(include); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java index 45f61080d7f68..855b579e8843b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DataTierMigrationRoutedStep.java @@ -56,7 +56,8 @@ public Result isConditionMet(Index index, ClusterState clusterState) { Optional availableDestinationTier = DataTierAllocationDecider.preferredAvailableTier( preferredTierConfiguration, clusterState.getNodes(), - DesiredNodes.latestFromClusterState(clusterState) + DesiredNodes.latestFromClusterState(clusterState), + clusterState.metadata().nodeShutdowns() ); if (ActiveShardCount.ALL.enoughShardsActive(clusterState, index.getName()) == false) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java index f3bcf95ad35d4..ca47453cbf563 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DownsampleAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.health.ClusterHealthStatus; @@ -85,7 +85,7 @@ public DownsampleAction(final DateHistogramInterval fixedInterval, final TimeVal public DownsampleAction(StreamInput in) throws IOException { this( new DateHistogramInterval(in), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_054) + in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054) ? TimeValue.parseTimeValue(in.readString(), WAIT_TIMEOUT_FIELD.getPreferredName()) : DEFAULT_WAIT_TIMEOUT ); @@ -94,7 +94,7 @@ public DownsampleAction(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { fixedInterval.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_054)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_054)) { out.writeString(waitTimeout.getStringRep()); } else { out.writeString(DEFAULT_WAIT_TIMEOUT.getStringRep()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 879db231a99e3..890045101c35c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -305,7 +305,7 @@ public IndexLifecycleExplainResponse(StreamInput in) throws IOException { repositoryName = in.readOptionalString(); snapshotName = in.readOptionalString(); shrinkIndexName = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { indexCreationDate = in.readOptionalLong(); } else { indexCreationDate = null; @@ -352,7 +352,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(repositoryName); out.writeOptionalString(snapshotName); out.writeOptionalString(shrinkIndexName); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalLong(indexCreationDate); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java index 19bc639a7e595..cc2e54e5be247 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -33,13 +34,13 @@ public class IndexLifecycleFeatureSetUsage extends XPackFeatureSet.Usage { public IndexLifecycleFeatureSetUsage(StreamInput input) throws IOException { super(input); if (input.readBoolean()) { - policyStats = input.readList(PolicyStats::new); + policyStats = input.readCollectionAsList(PolicyStats::new); } } @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } @Override @@ -48,7 +49,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasPolicyStats = policyStats != null; out.writeBoolean(hasPolicyStats); if (hasPolicyStats) { - out.writeList(policyStats); + out.writeCollection(policyStats); } } @@ -111,7 +112,7 @@ public PolicyStats(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(phaseStats, StreamOutput::writeString, (o, p) -> p.writeTo(o)); + out.writeMap(phaseStats, StreamOutput::writeWriteable); out.writeVInt(indicesManaged); } @@ -426,12 +427,12 @@ public ActionConfigStats(StreamInput in) throws IOException { this.setPriorityPriority = in.readOptionalVInt(); this.shrinkMaxPrimaryShardSize = in.readOptionalWriteable(ByteSizeValue::readFrom); this.shrinkNumberOfShards = in.readOptionalVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { this.rolloverMaxPrimaryShardDocs = in.readOptionalVLong(); } else { this.rolloverMaxPrimaryShardDocs = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.rolloverMinAge = in.readOptionalTimeValue(); this.rolloverMinDocs = in.readOptionalVLong(); this.rolloverMinPrimaryShardSize = in.readOptionalWriteable(ByteSizeValue::readFrom); @@ -457,10 +458,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(setPriorityPriority); out.writeOptionalWriteable(shrinkMaxPrimaryShardSize); out.writeOptionalVInt(shrinkNumberOfShards); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeOptionalVLong(rolloverMaxPrimaryShardDocs); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalTimeValue(rolloverMinAge); out.writeOptionalVLong(rolloverMinDocs); out.writeOptionalWriteable(rolloverMinPrimaryShardSize); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java index 27265f33c136c..d4f2ecb36e95d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -75,7 +76,7 @@ public IndexLifecycleMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policyMetadatas, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(policyMetadatas, StreamOutput::writeWriteable); out.writeEnum(operationMode); } @@ -113,7 +114,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -189,7 +190,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } static Diff readLifecyclePolicyDiffFrom(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java index a468058e82f5d..529eb16b668c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; @@ -126,7 +127,7 @@ public Iterator toXContentChunked(ToXContent.Params params @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_7_0; + return TransportVersions.V_8_7_0; } @Override @@ -196,7 +197,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_7_0; + return TransportVersions.V_8_7_0; } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java index 4beebbe971440..540a31b0116b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicy.java @@ -135,7 +135,7 @@ public static LifecyclePolicy parse(XContentParser parser, String name) { public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(type); out.writeString(name); - out.writeMap(phases, StreamOutput::writeString, (o, val) -> val.writeTo(o)); + out.writeMap(phases, StreamOutput::writeWriteable); out.writeGenericMap(this.metadata); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java index 961b06a943739..515941bce841a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/RolloverAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; @@ -88,10 +88,10 @@ public static RolloverAction read(StreamInput in) throws IOException { builder.addMaxPrimaryShardSizeCondition(in.readOptionalWriteable(ByteSizeValue::readFrom)); builder.addMaxIndexAgeCondition(in.readOptionalTimeValue()); builder.addMaxIndexDocsCondition(in.readOptionalVLong()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { builder.addMaxPrimaryShardDocsCondition(in.readOptionalVLong()); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { builder.addMinIndexSizeCondition(in.readOptionalWriteable(ByteSizeValue::readFrom)); builder.addMinPrimaryShardSizeCondition(in.readOptionalWriteable(ByteSizeValue::readFrom)); builder.addMinIndexAgeCondition(in.readOptionalTimeValue()); @@ -106,7 +106,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(conditions.getMaxSize()); out.writeOptionalWriteable(conditions.getMaxPrimaryShardSize()); out.writeOptionalTimeValue(conditions.getMaxAge()); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeOptionalVLong(conditions.getMaxDocs()); out.writeOptionalVLong(conditions.getMaxPrimaryShardDocs()); } else { @@ -117,7 +117,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVLong(conditions.getMaxDocs()); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalWriteable(conditions.getMinSize()); out.writeOptionalWriteable(conditions.getMinPrimaryShardSize()); out.writeOptionalTimeValue(conditions.getMinAge()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStep.java index e326f591c64cd..923b57988b415 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForDataTierStep.java @@ -37,7 +37,8 @@ public Result isConditionMet(Index index, ClusterState clusterState) { boolean present = DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList(tierPreference), clusterState.nodes(), - DesiredNodes.latestFromClusterState(clusterState) + DesiredNodes.latestFromClusterState(clusterState), + clusterState.metadata().nodeShutdowns() ).isPresent(); SingleMessageFieldInfo info = present ? null : new SingleMessageFieldInfo("no nodes for tiers [" + tierPreference + "] available"); return new Result(present, info); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java index abfbbe975a032..ffa8cadee77b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/GetLifecycleAction.java @@ -45,7 +45,7 @@ public static class Response extends ActionResponse implements ChunkedToXContent public Response(StreamInput in) throws IOException { super(in); - this.policies = in.readList(LifecyclePolicyResponseItem::new); + this.policies = in.readCollectionAsList(LifecyclePolicyResponseItem::new); } public Response(List policies) { @@ -58,7 +58,7 @@ public List getPolicies() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(policies); + out.writeCollection(policies); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyAction.java index b3cda42b57561..4fe4861ce3455 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/action/RemoveIndexLifecyclePolicyAction.java @@ -52,7 +52,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); - failedIndexes = in.readStringList(); + failedIndexes = in.readStringCollectionAsList(); } public Response(List failedIndexes) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java index 4baf686c90dde..54b9fe7d76a85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexer.java @@ -77,16 +77,16 @@ static class ScheduledRunnable { // with wrapping the command in RunOnce we ensure the command isn't executed twice, e.g. if the // future is already running and cancel returns true this.command = new RunOnce(command); - this.scheduled = threadPool.schedule(command::run, delay, ThreadPool.Names.GENERIC); + this.scheduled = threadPool.schedule(command, delay, threadPool.generic()); } public void reschedule(TimeValue delay) { // note: cancel return true if the runnable is currently executing if (scheduled.cancel()) { if (delay.duration() > 0) { - scheduled = threadPool.schedule(command::run, delay, ThreadPool.Names.GENERIC); + scheduled = threadPool.schedule(command, delay, threadPool.generic()); } else { - threadPool.executor(ThreadPool.Names.GENERIC).execute(command::run); + threadPool.generic().execute(command); } } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java index 1bdc35962f7fe..a13cdf1966811 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/indexing/IndexerJobStats.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.indexing; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -84,7 +84,7 @@ public IndexerJobStats(StreamInput in) throws IOException { this.indexFailures = in.readVLong(); this.searchFailures = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { this.processingTime = in.readVLong(); this.processingTotal = in.readVLong(); } @@ -205,7 +205,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(searchTotal); out.writeVLong(indexFailures); out.writeVLong(searchFailures); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_7_0)) { out.writeVLong(processingTime); out.writeVLong(processingTotal); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java index 3152a780730f8..a83b8439aa612 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/logstash/LogstashFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.logstash; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; @@ -25,7 +26,7 @@ public LogstashFeatureSetUsage(boolean available) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java index 515def6fb56e8..ef2e5324678a4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MachineLearningFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -65,7 +66,7 @@ public MachineLearningFeatureSetUsage(StreamInput in) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index db0509801acf3..e791f5f474f5c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; @@ -66,7 +67,7 @@ public boolean isResetMode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -85,7 +86,7 @@ public Diff diff(Metadata.Custom previousState) { } public MlMetadata(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { int size = in.readVInt(); for (int i = 0; i < size; i++) { in.readString(); @@ -103,7 +104,7 @@ public MlMetadata(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { writeMap(Collections.emptySortedMap(), out); writeMap(Collections.emptySortedMap(), out); } @@ -138,7 +139,7 @@ public static class MlMetadataDiff implements NamedDiff { } public MlMetadataDiff(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), Job::new, MlMetadataDiff::readJobDiffFrom); DiffableUtils.readJdkMapDiff( in, @@ -163,7 +164,7 @@ public Metadata.Custom apply(Metadata.Custom part) { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { SortedMap jobs = Collections.emptySortedMap(); DiffableUtils.diff(jobs, jobs, DiffableUtils.getStringKeySerializer()).writeTo(out); SortedMap datafeeds = Collections.emptySortedMap(); @@ -180,7 +181,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } static Diff readJobDiffFrom(StreamInput in) throws IOException { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java index 37cf558c2b8c2..a101865c6c175 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteJobAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -52,7 +52,7 @@ public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); force = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { deleteUserAnnotations = in.readBoolean(); } else { deleteUserAnnotations = false; @@ -110,7 +110,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); out.writeBoolean(force); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeBoolean(deleteUserAnnotations); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java index ae1d787e8a11a..fb71884c07c4b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/DeleteTrainedModelAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -41,7 +41,7 @@ public static class Request extends AcknowledgedRequest implements ToXC public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { force = in.readBoolean(); } else { force = false; @@ -88,7 +88,7 @@ public boolean equals(Object o) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeBoolean(force); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateModelMemoryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateModelMemoryAction.java index 0348db4a218fe..ec9200e6f2b46 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateModelMemoryAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EstimateModelMemoryAction.java @@ -87,8 +87,8 @@ public void writeTo(StreamOutput out) throws IOException { } else { out.writeBoolean(false); } - out.writeMap(overallCardinality, StreamOutput::writeString, StreamOutput::writeVLong); - out.writeMap(maxBucketCardinality, StreamOutput::writeString, StreamOutput::writeVLong); + out.writeMap(overallCardinality, StreamOutput::writeVLong); + out.writeMap(maxBucketCardinality, StreamOutput::writeVLong); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java index d0793053b2d1f..60cecddf34f68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/EvaluateDataFrameAction.java @@ -194,7 +194,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); this.evaluationName = in.readString(); - this.metrics = in.readNamedWriteableList(EvaluationMetricResult.class); + this.metrics = in.readNamedWriteableCollectionAsList(EvaluationMetricResult.class); } public Response(String evaluationName, List metrics) { @@ -213,7 +213,7 @@ public List getMetrics() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(evaluationName); - out.writeNamedWriteableList(metrics); + out.writeNamedWriteableCollection(metrics); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java index 04a893918a470..ec969538b0733 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ExplainDataFrameAnalyticsAction.java @@ -185,13 +185,13 @@ public Response(List fieldSelection, MemoryEstimation memoryEsti public Response(StreamInput in) throws IOException { super(in); - this.fieldSelection = in.readList(FieldSelection::new); + this.fieldSelection = in.readCollectionAsList(FieldSelection::new); this.memoryEstimation = new MemoryEstimation(in); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(fieldSelection); + out.writeCollection(fieldSelection); memoryEstimation.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java index ff85727ef0c62..12fba46e40689 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/FlushJobAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksResponse; import org.elasticsearch.common.io.stream.StreamInput; @@ -79,7 +79,7 @@ public Request(StreamInput in) throws IOException { advanceTime = in.readOptionalString(); skipTime = in.readOptionalString(); waitForNormalization = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { refreshRequired = in.readBoolean(); } } @@ -93,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(advanceTime); out.writeOptionalString(skipTime); out.writeBoolean(waitForNormalization); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java index 07d2e4f442035..9c1f730be7c44 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDataFrameAnalyticsStatsAction.java @@ -75,7 +75,7 @@ public Request(StreamInput in) throws IOException { id = in.readString(); allowNoMatch = in.readBoolean(); pageParams = in.readOptionalWriteable(PageParams::new); - expandedIds = in.readStringList(); + expandedIds = in.readStringCollectionAsList(); } public void setExpandedIds(List expandedIds) { @@ -207,7 +207,7 @@ public Stats(StreamInput in) throws IOException { id = in.readString(); state = DataFrameAnalyticsState.fromStream(in); failureReason = in.readOptionalString(); - progress = in.readList(PhaseProgress::new); + progress = in.readCollectionAsList(PhaseProgress::new); dataCounts = new DataCounts(in); memoryUsage = new MemoryUsage(in); analysisStats = in.readOptionalNamedWriteable(AnalysisStats.class); @@ -312,7 +312,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(id); state.writeTo(out); out.writeOptionalString(failureReason); - out.writeList(progress); + out.writeCollection(progress); dataCounts.writeTo(out); memoryUsage.writeTo(out); out.writeOptionalNamedWriteable(analysisStats); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedRunningStateAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedRunningStateAction.java index 8b9aa5f0c0c34..413bc58e6332d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedRunningStateAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedRunningStateAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -54,7 +54,7 @@ public Request(List datafeedIds) { public Request(StreamInput in) throws IOException { super(in); - this.datafeedTaskIds = in.readSet(StreamInput::readString); + this.datafeedTaskIds = in.readCollectionAsSet(StreamInput::readString); } @Override @@ -100,7 +100,7 @@ public RunningState(boolean realTimeConfigured, boolean realTimeRunning, @Nullab public RunningState(StreamInput in) throws IOException { this.realTimeConfigured = in.readBoolean(); this.realTimeRunning = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { this.searchInterval = in.readOptionalWriteable(SearchInterval::new); } else { this.searchInterval = null; @@ -126,7 +126,7 @@ public int hashCode() { public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(realTimeConfigured); out.writeBoolean(realTimeRunning); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalWriteable(searchInterval); } } @@ -180,7 +180,7 @@ public Map getDatafeedRunningState() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(datafeedRunningState, StreamOutput::writeString, (o, w) -> w.writeTo(o)); + out.writeMap(datafeedRunningState, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDeploymentStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDeploymentStatsAction.java index ea2f3b6908b2e..5f8ed83e1f891 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDeploymentStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDeploymentStatsAction.java @@ -50,7 +50,7 @@ public Request(String deploymentId) { public Request(StreamInput in) throws IOException { super(in); this.deploymentId = in.readString(); - this.expandedIds = in.readStringList(); + this.expandedIds = in.readStringCollectionAsList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java index 8ecef84b34066..e8b0041875b07 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetJobsStatsAction.java @@ -78,7 +78,7 @@ public Request(String jobId) { public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); - expandedJobsIds = in.readStringList(); + expandedJobsIds = in.readStringCollectionAsList(); allowNoMatch = in.readBoolean(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsAction.java index 27205908f46ae..0e58a5946a277 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsAction.java @@ -81,12 +81,12 @@ public Includes(Set includes) { } public Includes(StreamInput in) throws IOException { - this.includes = in.readSet(StreamInput::readString); + this.includes = in.readCollectionAsSet(StreamInput::readString); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(this.includes, StreamOutput::writeString); + out.writeStringCollection(this.includes); } public boolean isIncludeModelDefinition() { @@ -146,7 +146,7 @@ public Request(String id, List tags, Set includes) { public Request(StreamInput in) throws IOException { super(in); this.includes = new Includes(in); - this.tags = in.readStringList(); + this.tags = in.readStringCollectionAsList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsAction.java index f6a35efe637fc..11f2a4191afe8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -118,7 +118,7 @@ public TrainedModelStats( public TrainedModelStats(StreamInput in) throws IOException { modelId = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { modelSizeStats = in.readOptionalWriteable(TrainedModelSizeStats::new); } else { modelSizeStats = null; @@ -126,7 +126,7 @@ public TrainedModelStats(StreamInput in) throws IOException { ingestStats = IngestStats.read(in); pipelineCount = in.readVInt(); inferenceStats = in.readOptionalWriteable(InferenceStats::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.deploymentStats = in.readOptionalWriteable(AssignmentStats::new); } else { this.deploymentStats = null; @@ -182,13 +182,13 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(modelId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeOptionalWriteable(modelSizeStats); } ingestStats.writeTo(out); out.writeVInt(pipelineCount); out.writeOptionalWriteable(inferenceStats); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeOptionalWriteable(deploymentStats); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java index c5abe89e59c9c..fdc4040b0be49 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferModelAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -147,20 +147,20 @@ public static Request forTextInput(String id, InferenceConfigUpdate update, List public Request(StreamInput in) throws IOException { super(in); this.id = in.readString(); - this.objectsToInfer = in.readImmutableList(StreamInput::readMap); + this.objectsToInfer = in.readCollectionAsImmutableList(StreamInput::readMap); this.update = in.readNamedWriteable(InferenceConfigUpdate.class); this.previouslyLicensed = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { this.inferenceTimeout = in.readTimeValue(); } else { this.inferenceTimeout = TimeValue.MAX_VALUE; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - textInput = in.readOptionalStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { + textInput = in.readOptionalStringCollectionAsList(); } else { textInput = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { highPriority = in.readBoolean(); } } @@ -222,13 +222,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(objectsToInfer, StreamOutput::writeGenericMap); out.writeNamedWriteable(update); out.writeBoolean(previouslyLicensed); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeTimeValue(inferenceTimeout); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalStringCollection(textInput); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(highPriority); } } @@ -316,7 +316,7 @@ public Response(List inferenceResults, String id, boolean isLi public Response(StreamInput in) throws IOException { super(in); - this.inferenceResults = Collections.unmodifiableList(in.readNamedWriteableList(InferenceResults.class)); + this.inferenceResults = Collections.unmodifiableList(in.readNamedWriteableCollectionAsList(InferenceResults.class)); this.isLicensed = in.readBoolean(); this.id = in.readOptionalString(); } @@ -335,7 +335,7 @@ public String getId() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(inferenceResults); + out.writeNamedWriteableCollection(inferenceResults); out.writeBoolean(isLicensed); out.writeOptionalString(id); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java index 66219f1dbdc4b..916f09c84b6f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; @@ -145,14 +145,14 @@ public static Request forTextInput(String id, InferenceConfigUpdate update, List public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - docs = in.readImmutableList(StreamInput::readMap); + docs = in.readCollectionAsImmutableList(StreamInput::readMap); update = in.readOptionalNamedWriteable(InferenceConfigUpdate.class); inferenceTimeout = in.readOptionalTimeValue(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { highPriority = in.readBoolean(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { - textInput = in.readOptionalStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { + textInput = in.readOptionalStringCollectionAsList(); } else { textInput = null; } @@ -220,10 +220,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeCollection(docs, StreamOutput::writeGenericMap); out.writeOptionalNamedWriteable(update); out.writeOptionalTimeValue(inferenceTimeout); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeBoolean(highPriority); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalStringCollection(textInput); } } @@ -320,8 +320,8 @@ public Response(StreamInput in) throws IOException { super(in); // Multiple results added in 8.6.1 - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_1)) { - results = in.readNamedWriteableList(InferenceResults.class); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_1)) { + results = in.readNamedWriteableCollectionAsList(InferenceResults.class); } else { results = List.of(in.readNamedWriteable(InferenceResults.class)); } @@ -331,8 +331,8 @@ public Response(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_1)) { - out.writeNamedWriteableList(results); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_1)) { + out.writeNamedWriteableCollection(results); } else { out.writeNamedWriteable(results.get(0)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java index 06b5418d5ee97..c6d4428d0e369 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/MlMemoryAction.java @@ -318,12 +318,12 @@ public Response(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(MlMemoryStats::new); + return in.readCollectionAsList(MlMemoryStats::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java index 1b7819c38eab9..9fad95d49158e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/OpenJobAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -230,7 +231,7 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java index b396e1be1af1f..4afad7a650db0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PostCalendarEventsAction.java @@ -68,7 +68,7 @@ public static Request parseRequest(String calendarId, XContentParser parser) thr public Request(StreamInput in) throws IOException { super(in); calendarId = in.readString(); - scheduledEvents = in.readList(ScheduledEvent::new); + scheduledEvents = in.readCollectionAsList(ScheduledEvent::new); } public Request(String calendarId, List scheduledEvents) { @@ -97,7 +97,7 @@ public ActionRequestValidationException validate() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(calendarId); - out.writeList(scheduledEvents); + out.writeCollection(scheduledEvents); } @Override @@ -124,7 +124,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); - in.readList(ScheduledEvent::new); + in.readCollectionAsList(ScheduledEvent::new); } public Response(List scheduledEvents) { @@ -133,7 +133,7 @@ public Response(List scheduledEvents) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(scheduledEvents); + out.writeCollection(scheduledEvents); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java index ad634ff8d4b82..97cbc29b3cf44 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDataFrameAnalyticsAction.java @@ -143,7 +143,7 @@ public Response(List> featureValues) { public Response(StreamInput in) throws IOException { super(in); - this.featureValues = in.readList(StreamInput::readMap); + this.featureValues = in.readCollectionAsList(StreamInput::readMap); } public List> getFeatureValues() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java index 5faf4b545ef71..c63dd20651be4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PreviewDatafeedAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -85,7 +85,7 @@ public Request(StreamInput in) throws IOException { datafeedId = in.readString(); datafeedConfig = in.readOptionalWriteable(DatafeedConfig::new); jobConfig = in.readOptionalWriteable(Job.Builder::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { this.startTime = in.readOptionalLong(); this.endTime = in.readOptionalLong(); } else { @@ -163,7 +163,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(datafeedId); out.writeOptionalWriteable(datafeedConfig); out.writeOptionalWriteable(jobConfig); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeOptionalLong(startTime); out.writeOptionalLong(endTime); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java index affba6f6eafb3..5f81290261232 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -82,7 +82,7 @@ public Request(StreamInput in) throws IOException { super(in); this.config = new TrainedModelConfig(in); this.deferDefinitionDecompression = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.waitForCompletion = in.readBoolean(); } else { this.waitForCompletion = false; @@ -122,7 +122,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); config.writeTo(out); out.writeBoolean(deferDefinitionDecompression); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(waitForCompletion); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java index 444758962fcee..61b39e40a065c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -91,7 +91,7 @@ public Request(StreamInput in) throws IOException { this.part = in.readVInt(); this.totalDefinitionLength = in.readVLong(); this.totalParts = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_043)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -148,7 +148,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(part); out.writeVLong(totalDefinitionLength); out.writeVInt(totalParts); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_043)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java index 825e793cee135..71d4ebdcb6ea5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelVocabularyAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -80,18 +80,18 @@ public Request( public Request(StreamInput in) throws IOException { super(in); this.modelId = in.readString(); - this.vocabulary = in.readStringList(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { - this.merges = in.readStringList(); + this.vocabulary = in.readStringCollectionAsList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { + this.merges = in.readStringCollectionAsList(); } else { this.merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { - this.scores = in.readList(StreamInput::readDouble); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + this.scores = in.readCollectionAsList(StreamInput::readDouble); } else { this.scores = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_043)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { this.allowOverwriting = in.readBoolean(); } else { this.allowOverwriting = false; @@ -133,13 +133,13 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(modelId); out.writeStringCollection(vocabulary); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeCollection(scores, StreamOutput::writeDouble); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_043)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_043)) { out.writeBoolean(allowOverwriting); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java index 904d4274b7e03..d9b900cc465d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/ResetJobAction.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -30,7 +31,7 @@ public class ResetJobAction extends ActionType { public static final String NAME = "cluster:admin/xpack/ml/job/reset"; public static final ResetJobAction INSTANCE = new ResetJobAction(); - public static final TransportVersion TRANSPORT_VERSION_INTRODUCED = TransportVersion.V_7_14_0; + public static final TransportVersion TRANSPORT_VERSION_INTRODUCED = TransportVersions.V_7_14_0; private ResetJobAction() { super(NAME, AcknowledgedResponse::readFrom); @@ -64,7 +65,7 @@ public Request(StreamInput in) throws IOException { super(in); jobId = in.readString(); skipJobStateValidation = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { deleteUserAnnotations = in.readBoolean(); } else { deleteUserAnnotations = false; @@ -76,7 +77,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(jobId); out.writeBoolean(skipJobStateValidation); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeBoolean(deleteUserAnnotations); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java index 37893ae274177..dd56eec10200b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDataFrameAnalyticsAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; @@ -146,7 +147,7 @@ public String toString() { public static class TaskParams implements PersistentTaskParams, MlTaskParams { public static final MlConfigVersion VERSION_INTRODUCED = MlConfigVersion.V_7_3_0; - public static final TransportVersion TRANSPORT_VERSION_INTRODUCED = TransportVersion.V_7_3_0; + public static final TransportVersion TRANSPORT_VERSION_INTRODUCED = TransportVersions.V_7_3_0; public static final Version VERSION_DESTINATION_INDEX_MAPPINGS_CHANGED = Version.V_7_10_0; public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java index 3b12cf1431258..c664cf97ba2a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartDatafeedAction.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.ValidateActions; @@ -202,7 +203,7 @@ public DatafeedParams(StreamInput in) throws IOException { endTime = in.readOptionalLong(); timeout = TimeValue.timeValueMillis(in.readVLong()); jobId = in.readOptionalString(); - datafeedIndices = in.readStringList(); + datafeedIndices = in.readStringCollectionAsList(); indicesOptions = IndicesOptions.readIndicesOptions(in); } @@ -276,7 +277,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java index fa219e233cfb2..b00dcfd731aee 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StartTrainedModelDeploymentAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.MasterNodeRequest; @@ -155,16 +155,16 @@ public Request(StreamInput in) throws IOException { numberOfAllocations = in.readVInt(); threadsPerAllocation = in.readVInt(); queueCapacity = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.cacheSize = in.readOptionalWriteable(ByteSizeValue::readFrom); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { this.priority = in.readEnum(Priority.class); } else { this.priority = Priority.NORMAL; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.deploymentId = in.readString(); } else { this.deploymentId = modelId; @@ -253,13 +253,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(numberOfAllocations); out.writeVInt(threadsPerAllocation); out.writeVInt(queueCapacity); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalWriteable(cacheSize); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeEnum(priority); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } } @@ -509,17 +509,17 @@ public TaskParams(StreamInput in) throws IOException { this.threadsPerAllocation = in.readVInt(); this.numberOfAllocations = in.readVInt(); this.queueCapacity = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.cacheSize = in.readOptionalWriteable(ByteSizeValue::readFrom); } else { this.cacheSize = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { this.priority = in.readEnum(Priority.class); } else { this.priority = Priority.NORMAL; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.deploymentId = in.readString(); } else { this.deploymentId = modelId; @@ -575,13 +575,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(threadsPerAllocation); out.writeVInt(numberOfAllocations); out.writeVInt(queueCapacity); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalWriteable(cacheSize); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeEnum(priority); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } if (out.getTransportVersion().onOrAfter(TrainedModelConfig.VERSION_ALLOCATION_MEMORY_ADDED)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java index 44f458ef1d71c..c5ad45d1f6ce9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopDataFrameAnalyticsAction.java @@ -146,7 +146,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBoolean(allowNoMatch); out.writeBoolean(force); - out.writeStringArray(expandedIds.toArray(new String[0])); + out.writeStringCollection(expandedIds); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopTrainedModelDeploymentAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopTrainedModelDeploymentAction.java index e0c8ad6fd90ea..5f62a48e761da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/StopTrainedModelDeploymentAction.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; import org.elasticsearch.action.support.tasks.BaseTasksResponse; @@ -40,10 +41,12 @@ public static class Request extends BaseTasksRequest implements ToXCont public static final ParseField ALLOW_NO_MATCH = new ParseField("allow_no_match"); public static final ParseField FORCE = new ParseField("force"); + public static final ParseField FINISH_PENDING_WORK = new ParseField("finish_pending_work"); private String id; private boolean allowNoMatch = true; private boolean force; + private boolean finishPendingWork; private static final ObjectParser PARSER = new ObjectParser<>(NAME, Request::new); @@ -51,6 +54,7 @@ public static class Request extends BaseTasksRequest implements ToXCont PARSER.declareString(Request::setId, TrainedModelConfig.MODEL_ID); PARSER.declareBoolean(Request::setAllowNoMatch, ALLOW_NO_MATCH); PARSER.declareBoolean(Request::setForce, FORCE); + PARSER.declareBoolean(Request::setFinishPendingWork, FINISH_PENDING_WORK); } public static Request parseRequest(String id, XContentParser parser) { @@ -74,6 +78,12 @@ public Request(StreamInput in) throws IOException { id = in.readString(); allowNoMatch = in.readBoolean(); force = in.readBoolean(); + + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_068)) { + finishPendingWork = in.readBoolean(); + } else { + finishPendingWork = false; + } } private Request() {} @@ -102,6 +112,14 @@ public boolean isForce() { return force; } + public boolean shouldFinishPendingWork() { + return finishPendingWork; + } + + public void setFinishPendingWork(boolean finishPendingWork) { + this.finishPendingWork = finishPendingWork; + } + @Override public boolean match(Task task) { return StartTrainedModelDeploymentAction.TaskMatcher.match(task, id); @@ -113,6 +131,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeBoolean(allowNoMatch); out.writeBoolean(force); + + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_068)) { + out.writeBoolean(finishPendingWork); + } } @Override @@ -121,13 +143,14 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(TrainedModelConfig.MODEL_ID.getPreferredName(), id); builder.field(ALLOW_NO_MATCH.getPreferredName(), allowNoMatch); builder.field(FORCE.getPreferredName(), force); + builder.field(FINISH_PENDING_WORK.getPreferredName(), finishPendingWork); builder.endObject(); return builder; } @Override public int hashCode() { - return Objects.hash(id, allowNoMatch, force); + return Objects.hash(id, allowNoMatch, force, finishPendingWork); } @Override @@ -136,7 +159,10 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; Request that = (Request) o; - return Objects.equals(id, that.id) && allowNoMatch == that.allowNoMatch && force == that.force; + return Objects.equals(id, that.id) + && allowNoMatch == that.allowNoMatch + && force == that.force + && finishPendingWork == that.finishPendingWork; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/TrainedModelCacheInfoAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/TrainedModelCacheInfoAction.java index b14d42088a26d..e3d6f578d2d1f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/TrainedModelCacheInfoAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/TrainedModelCacheInfoAction.java @@ -126,12 +126,12 @@ public Response(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(CacheInfo::new); + return in.readCollectionAsList(CacheInfo::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java index 184c92d1e36f2..e63aa95aeefd7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateFilterAction.java @@ -128,8 +128,8 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(filterId); out.writeOptionalString(description); - out.writeStringArray(addItems.toArray(new String[addItems.size()])); - out.writeStringArray(removeItems.toArray(new String[removeItems.size()])); + out.writeStringCollection(addItems); + out.writeStringCollection(removeItems); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java index b6bd1fb0a2c0a..1c0f79a686390 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/UpdateProcessAction.java @@ -101,7 +101,7 @@ public Request(StreamInput in) throws IOException { modelPlotConfig = in.readOptionalWriteable(ModelPlotConfig::new); perPartitionCategorizationConfig = in.readOptionalWriteable(PerPartitionCategorizationConfig::new); if (in.readBoolean()) { - detectorUpdates = in.readList(JobUpdate.DetectorUpdate::new); + detectorUpdates = in.readCollectionAsList(JobUpdate.DetectorUpdate::new); } filter = in.readOptionalWriteable(MlFilter::new); updateScheduledEvents = in.readBoolean(); @@ -115,7 +115,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasDetectorUpdates = detectorUpdates != null; out.writeBoolean(hasDetectorUpdates); if (hasDetectorUpdates) { - out.writeList(detectorUpdates); + out.writeCollection(detectorUpdates); } out.writeOptionalWriteable(filter); out.writeBoolean(updateScheduledEvents); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java index 00d3f1ab59706..2fd5a6ddf9cf7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/calendars/Calendar.java @@ -100,7 +100,7 @@ public String getDescription() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - out.writeStringArray(jobIds.toArray(new String[jobIds.size()])); + out.writeStringCollection(jobIds); out.writeOptionalString(description); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java index d5e39cf75ec27..d994647743634 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/AggProvider.java @@ -8,7 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -117,7 +117,7 @@ static AggProvider fromStream(StreamInput in) throws IOException { in.readMap(), in.readOptionalWriteable(AggregatorFactories.Builder::new), in.readException(), - in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0) ? in.readBoolean() : false + in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0) ? in.readBoolean() : false ); } @@ -140,7 +140,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(aggs); out.writeOptionalWriteable(parsedAggs); out.writeException(parsingException); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeBoolean(rewroteAggs); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java index 486fd56fefbbf..7f5de886222ba 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedConfig.java @@ -279,7 +279,7 @@ public DatafeedConfig(StreamInput in) throws IOException { this.queryDelay = in.readOptionalTimeValue(); this.frequency = in.readOptionalTimeValue(); if (in.readBoolean()) { - this.indices = in.readImmutableList(StreamInput::readString); + this.indices = in.readCollectionAsImmutableList(StreamInput::readString); } else { this.indices = null; } @@ -289,7 +289,7 @@ public DatafeedConfig(StreamInput in) throws IOException { this.aggProvider = in.readOptionalWriteable(AggProvider::fromStream); if (in.readBoolean()) { - this.scriptFields = in.readImmutableList(SearchSourceBuilder.ScriptField::new); + this.scriptFields = in.readCollectionAsImmutableList(SearchSourceBuilder.ScriptField::new); } else { this.scriptFields = null; } @@ -529,13 +529,13 @@ public void writeTo(StreamOutput out) throws IOException { if (scriptFields != null) { out.writeBoolean(true); - out.writeList(scriptFields); + out.writeCollection(scriptFields); } else { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); out.writeOptionalWriteable(delayedDataCheckConfig); out.writeOptionalVInt(maxEmptySearches); indicesOptions.writeIndicesOptions(out); @@ -792,7 +792,7 @@ public Builder(StreamInput in) throws IOException { this.queryDelay = in.readOptionalTimeValue(); this.frequency = in.readOptionalTimeValue(); if (in.readBoolean()) { - this.indices = in.readImmutableList(StreamInput::readString); + this.indices = in.readCollectionAsImmutableList(StreamInput::readString); } else { this.indices = null; } @@ -802,7 +802,7 @@ public Builder(StreamInput in) throws IOException { this.aggProvider = in.readOptionalWriteable(AggProvider::fromStream); if (in.readBoolean()) { - this.scriptFields = in.readImmutableList(SearchSourceBuilder.ScriptField::new); + this.scriptFields = in.readCollectionAsImmutableList(SearchSourceBuilder.ScriptField::new); } else { this.scriptFields = null; } @@ -837,13 +837,13 @@ public void writeTo(StreamOutput out) throws IOException { if (scriptFields != null) { out.writeBoolean(true); - out.writeList(scriptFields); + out.writeCollection(scriptFields); } else { out.writeBoolean(false); } out.writeOptionalVInt(scrollSize); out.writeOptionalWriteable(chunkingConfig); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); out.writeOptionalWriteable(delayedDataCheckConfig); out.writeOptionalVInt(maxEmptySearches); out.writeBoolean(indicesOptions != null); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java index 6e7575100e84e..ff3e765508daf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/datafeed/DatafeedUpdate.java @@ -148,7 +148,7 @@ public DatafeedUpdate(StreamInput in) throws IOException { this.queryDelay = in.readOptionalTimeValue(); this.frequency = in.readOptionalTimeValue(); if (in.readBoolean()) { - this.indices = in.readStringList(); + this.indices = in.readStringCollectionAsList(); } else { this.indices = null; } @@ -157,7 +157,7 @@ public DatafeedUpdate(StreamInput in) throws IOException { this.aggProvider = in.readOptionalWriteable(AggProvider::fromStream); if (in.readBoolean()) { - this.scriptFields = in.readList(SearchSourceBuilder.ScriptField::new); + this.scriptFields = in.readCollectionAsList(SearchSourceBuilder.ScriptField::new); } else { this.scriptFields = null; } @@ -194,7 +194,7 @@ public void writeTo(StreamOutput out) throws IOException { if (scriptFields != null) { out.writeBoolean(true); - out.writeList(scriptFields); + out.writeCollection(scriptFields); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java index 66f0b243cb1c9..bff15c016af0e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfig.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.dataframe; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -191,7 +191,7 @@ public DataFrameAnalyticsConfig(StreamInput in) throws IOException { this.version = in.readBoolean() ? MlConfigVersion.readVersion(in) : null; this.allowLazyStart = in.readBoolean(); this.maxNumThreads = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { Map readMeta = in.readMap(); this.meta = readMeta == null ? null : Collections.unmodifiableMap(readMeta); } else { @@ -309,7 +309,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(analysis); out.writeOptionalWriteable(analyzedFields); out.writeOptionalWriteable(modelMemoryLimit); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); out.writeOptionalInstant(createTime); if (version != null) { out.writeBoolean(true); @@ -319,7 +319,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeBoolean(allowLazyStart); out.writeVInt(maxNumThreads); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeGenericMap(meta); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java index 7af2a4dc63158..330683981d1ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigUpdate.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.dataframe; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -83,7 +83,7 @@ public DataFrameAnalyticsConfigUpdate(StreamInput in) throws IOException { this.modelMemoryLimit = in.readOptionalWriteable(ByteSizeValue::readFrom); this.allowLazyStart = in.readOptionalBoolean(); this.maxNumThreads = in.readOptionalVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { Map readMeta = in.readMap(); this.meta = readMeta == null ? null : Collections.unmodifiableMap(readMeta); } else { @@ -98,7 +98,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalWriteable(modelMemoryLimit); out.writeOptionalBoolean(allowLazyStart); out.writeOptionalVInt(maxNumThreads); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeGenericMap(meta); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java index f45903987f475..485a00888029c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Classification.java @@ -223,7 +223,7 @@ public Classification(StreamInput in) throws IOException { numTopClasses = in.readOptionalVInt(); trainingPercent = in.readDouble(); randomizeSeed = in.readOptionalLong(); - featureProcessors = Collections.unmodifiableList(in.readNamedWriteableList(PreProcessor.class)); + featureProcessors = Collections.unmodifiableList(in.readNamedWriteableCollectionAsList(PreProcessor.class)); earlyStoppingEnabled = in.readBoolean(); } @@ -278,7 +278,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(numTopClasses); out.writeDouble(trainingPercent); out.writeOptionalLong(randomizeSeed); - out.writeNamedWriteableList(featureProcessors); + out.writeNamedWriteableCollection(featureProcessors); out.writeBoolean(earlyStoppingEnabled); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java index ec20892427217..c42b44e84a6da 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/analyses/Regression.java @@ -183,7 +183,7 @@ public Regression(StreamInput in) throws IOException { randomizeSeed = in.readOptionalLong(); lossFunction = in.readEnum(LossFunction.class); lossFunctionParameter = in.readOptionalDouble(); - featureProcessors = Collections.unmodifiableList(in.readNamedWriteableList(PreProcessor.class)); + featureProcessors = Collections.unmodifiableList(in.readNamedWriteableCollectionAsList(PreProcessor.class)); earlyStoppingEnabled = in.readBoolean(); } @@ -238,7 +238,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalLong(randomizeSeed); out.writeEnum(lossFunction); out.writeOptionalDouble(lossFunctionParameter); - out.writeNamedWriteableList(featureProcessors); + out.writeNamedWriteableCollection(featureProcessors); out.writeBoolean(earlyStoppingEnabled); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Accuracy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Accuracy.java index 1c9e214308a32..346996a742cf1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Accuracy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Accuracy.java @@ -239,7 +239,7 @@ public Result(List classes, double overallAccuracy) { } public Result(StreamInput in) throws IOException { - this.classes = in.readImmutableList(PerClassSingleValue::new); + this.classes = in.readCollectionAsImmutableList(PerClassSingleValue::new); this.overallAccuracy = in.readDouble(); } @@ -263,7 +263,7 @@ public double getOverallAccuracy() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(classes); + out.writeCollection(classes); out.writeDouble(overallAccuracy); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Classification.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Classification.java index 7934c97b7f113..373402be8419d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Classification.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Classification.java @@ -110,7 +110,7 @@ public Classification(StreamInput in) throws IOException { in.readOptionalString(), true ); - this.metrics = in.readNamedWriteableList(EvaluationMetric.class); + this.metrics = in.readNamedWriteableCollectionAsList(EvaluationMetric.class); } @Override @@ -140,7 +140,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(fields.getTopClassesField()); out.writeOptionalString(fields.getPredictedClassField()); out.writeOptionalString(fields.getPredictedProbabilityField()); - out.writeNamedWriteableList(metrics); + out.writeNamedWriteableCollection(metrics); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java index ab9f54c7348e2..5279f026722af 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/MulticlassConfusionMatrix.java @@ -286,7 +286,7 @@ public Result(List actualClasses, long otherActualClassCount) { } public Result(StreamInput in) throws IOException { - this.actualClasses = in.readImmutableList(ActualClass::new); + this.actualClasses = in.readCollectionAsImmutableList(ActualClass::new); this.otherActualClassCount = in.readVLong(); } @@ -310,7 +310,7 @@ public long getOtherActualClassCount() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(actualClasses); + out.writeCollection(actualClasses); out.writeVLong(otherActualClassCount); } @@ -382,7 +382,7 @@ public ActualClass( public ActualClass(StreamInput in) throws IOException { this.actualClass = in.readString(); this.actualClassDocCount = in.readVLong(); - this.predictedClasses = in.readImmutableList(PredictedClass::new); + this.predictedClasses = in.readCollectionAsImmutableList(PredictedClass::new); this.otherPredictedClassDocCount = in.readVLong(); } @@ -406,7 +406,7 @@ public long getOtherPredictedClassDocCount() { public void writeTo(StreamOutput out) throws IOException { out.writeString(actualClass); out.writeVLong(actualClassDocCount); - out.writeList(predictedClasses); + out.writeCollection(predictedClasses); out.writeVLong(otherPredictedClassDocCount); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Precision.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Precision.java index 0d873289e2fb9..5b9cffd48f284 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Precision.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Precision.java @@ -232,7 +232,7 @@ public Result(List classes, double avgPrecision) { } public Result(StreamInput in) throws IOException { - this.classes = in.readImmutableList(PerClassSingleValue::new); + this.classes = in.readCollectionAsImmutableList(PerClassSingleValue::new); this.avgPrecision = in.readDouble(); } @@ -256,7 +256,7 @@ public double getAvgPrecision() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(classes); + out.writeCollection(classes); out.writeDouble(avgPrecision); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Recall.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Recall.java index e2b3d35c73b90..646af7848cf23 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Recall.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/classification/Recall.java @@ -202,7 +202,7 @@ public Result(List classes, double avgRecall) { } public Result(StreamInput in) throws IOException { - this.classes = in.readImmutableList(PerClassSingleValue::new); + this.classes = in.readCollectionAsImmutableList(PerClassSingleValue::new); this.avgRecall = in.readDouble(); } @@ -226,7 +226,7 @@ public double getAvgRecall() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(classes); + out.writeCollection(classes); out.writeDouble(avgRecall); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java index cd74f4e86eb8b..c45b10e9decf6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/common/AbstractAucRoc.java @@ -298,7 +298,7 @@ public Result(double value, List curve) { public Result(StreamInput in) throws IOException { this.value = in.readDouble(); - this.curve = in.readList(AucRocPoint::new); + this.curve = in.readCollectionAsList(AucRocPoint::new); } public double getValue() { @@ -322,7 +322,7 @@ public String getMetricName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeDouble(value); - out.writeList(curve); + out.writeCollection(curve); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/outlierdetection/OutlierDetection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/outlierdetection/OutlierDetection.java index 7de3308670cad..5e924558d5d77 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/outlierdetection/OutlierDetection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/outlierdetection/OutlierDetection.java @@ -98,7 +98,7 @@ private static List defaultMetrics() { public OutlierDetection(StreamInput in) throws IOException { this.fields = new EvaluationFields(in.readString(), null, null, null, in.readString(), false); - this.metrics = in.readNamedWriteableList(EvaluationMetric.class); + this.metrics = in.readNamedWriteableCollectionAsList(EvaluationMetric.class); } @Override @@ -125,7 +125,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(fields.getActualField()); out.writeString(fields.getPredictedProbabilityField()); - out.writeNamedWriteableList(metrics); + out.writeNamedWriteableCollection(metrics); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java index 0989c9ccd0f8c..f4b8866cae9bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/evaluation/regression/Regression.java @@ -87,7 +87,7 @@ private static List defaultMetrics() { public Regression(StreamInput in) throws IOException { this.fields = new EvaluationFields(in.readString(), in.readString(), null, null, null, false); - this.metrics = in.readNamedWriteableList(EvaluationMetric.class); + this.metrics = in.readNamedWriteableCollectionAsList(EvaluationMetric.class); } @Override @@ -114,7 +114,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(fields.getActualField()); out.writeString(fields.getPredictedField()); - out.writeNamedWriteableList(metrics); + out.writeNamedWriteableCollection(metrics); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/explain/FieldSelection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/explain/FieldSelection.java index e7329c706bb38..80996ae4a9781 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/explain/FieldSelection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/explain/FieldSelection.java @@ -101,7 +101,7 @@ public static FieldSelection excluded(String name, Set mappingTypes, Str public FieldSelection(StreamInput in) throws IOException { this.name = in.readString(); - this.mappingTypes = in.readImmutableSet(StreamInput::readString); + this.mappingTypes = in.readCollectionAsImmutableSet(StreamInput::readString); this.isIncluded = in.readBoolean(); this.isRequired = in.readBoolean(); boolean hasFeatureType = in.readBoolean(); @@ -118,7 +118,7 @@ public FieldSelection(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeCollection(mappingTypes, StreamOutput::writeString); + out.writeStringCollection(mappingTypes); out.writeBoolean(isIncluded); out.writeBoolean(isRequired); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/classification/ValidationLoss.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/classification/ValidationLoss.java index d4a105a9ca605..57562637f6c4f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/classification/ValidationLoss.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/classification/ValidationLoss.java @@ -57,13 +57,13 @@ public ValidationLoss(String lossType, List values) { public ValidationLoss(StreamInput in) throws IOException { lossType = in.readString(); - foldValues = in.readList(FoldValues::new); + foldValues = in.readCollectionAsList(FoldValues::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(lossType); - out.writeList(foldValues); + out.writeCollection(foldValues); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/regression/ValidationLoss.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/regression/ValidationLoss.java index c647c784948fa..227b0bc5c3622 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/regression/ValidationLoss.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/stats/regression/ValidationLoss.java @@ -57,13 +57,13 @@ public ValidationLoss(String lossType, List values) { public ValidationLoss(StreamInput in) throws IOException { lossType = in.readString(); - foldValues = in.readList(FoldValues::new); + foldValues = in.readCollectionAsList(FoldValues::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(lossType); - out.writeList(foldValues); + out.writeCollection(foldValues); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java index 77f3959b4a758..9d4442f877b85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelConfig.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -103,8 +104,8 @@ public class TrainedModelConfig implements ToXContentObject, Writeable { public static final ParseField PER_DEPLOYMENT_MEMORY_BYTES = new ParseField("per_deployment_memory_bytes"); public static final ParseField PER_ALLOCATION_MEMORY_BYTES = new ParseField("per_allocation_memory_bytes"); - public static final TransportVersion VERSION_3RD_PARTY_CONFIG_ADDED = TransportVersion.V_8_0_0; - public static final TransportVersion VERSION_ALLOCATION_MEMORY_ADDED = TransportVersion.V_8_500_064; + public static final TransportVersion VERSION_3RD_PARTY_CONFIG_ADDED = TransportVersions.V_8_0_0; + public static final TransportVersion VERSION_ALLOCATION_MEMORY_ADDED = TransportVersions.V_8_500_064; // These parsers follow the pattern that metadata is parsed leniently (to allow for enhancements), whilst config is parsed strictly public static final ObjectParser LENIENT_PARSER = createParser(true); @@ -255,7 +256,7 @@ public TrainedModelConfig(StreamInput in) throws IOException { description = in.readOptionalString(); createTime = in.readInstant(); definition = in.readOptionalWriteable(LazyModelDefinition::fromStreamInput); - tags = in.readImmutableList(StreamInput::readString); + tags = in.readCollectionAsImmutableList(StreamInput::readString); metadata = in.readMap(); input = new TrainedModelInput(in); modelSize = in.readVLong(); @@ -271,7 +272,7 @@ public TrainedModelConfig(StreamInput in) throws IOException { this.modelType = null; this.location = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { modelPackageConfig = in.readOptionalWriteable(ModelPackageConfig::new); fullDefinition = in.readOptionalBoolean(); } else { @@ -428,7 +429,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(description); out.writeInstant(createTime); out.writeOptionalWriteable(definition); - out.writeCollection(tags, StreamOutput::writeString); + out.writeStringCollection(tags); out.writeGenericMap(metadata); input.writeTo(out); out.writeVLong(modelSize); @@ -436,7 +437,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(licenseLevel.description()); if (defaultFieldMap != null) { out.writeBoolean(true); - out.writeMap(defaultFieldMap, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(defaultFieldMap, StreamOutput::writeString); } else { out.writeBoolean(false); } @@ -446,7 +447,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(location); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalWriteable(modelPackageConfig); out.writeOptionalBoolean(fullDefinition); } @@ -1045,7 +1046,7 @@ public static LazyModelDefinition fromBase64String(String base64String) { } public static LazyModelDefinition fromStreamInput(StreamInput input) throws IOException { - if (input.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { return new LazyModelDefinition(input.readBytesReference(), null); } else { return fromBase64String(input.readString()); @@ -1109,7 +1110,7 @@ private void ensureParsedDefinitionUnsafe(NamedXContentRegistry xContentRegistry @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeBytesReference(getCompressedDefinition()); } else { out.writeString(getBase64CompressedDefinition()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java index 3bab816b4c5be..f1540e25b4d88 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelDefinition.java @@ -84,13 +84,13 @@ private TrainedModelDefinition(TrainedModel trainedModel, List pre public TrainedModelDefinition(StreamInput in) throws IOException { this.trainedModel = in.readNamedWriteable(TrainedModel.class); - this.preProcessors = in.readNamedWriteableList(PreProcessor.class); + this.preProcessors = in.readNamedWriteableCollectionAsList(PreProcessor.class); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(trainedModel); - out.writeNamedWriteableList(preProcessors); + out.writeNamedWriteableCollection(preProcessors); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelInput.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelInput.java index 89a25a1cc62f8..a955cdeb88800 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelInput.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelInput.java @@ -35,7 +35,7 @@ public TrainedModelInput(List fieldNames) { } public TrainedModelInput(StreamInput in) throws IOException { - this.fieldNames = in.readImmutableList(StreamInput::readString); + this.fieldNames = in.readCollectionAsImmutableList(StreamInput::readString); } @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java index 301e895c57f37..d8e5d7a6d9603 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/AssignmentStats.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.assignment; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -163,7 +163,7 @@ public NodeStats(StreamInput in) throws IOException { this.pendingCount = in.readOptionalVInt(); this.routingState = in.readOptionalWriteable(RoutingStateAndReason::new); this.startTime = in.readOptionalInstant(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { this.threadsPerAllocation = in.readOptionalVInt(); this.numberOfAllocations = in.readOptionalVInt(); this.errorCount = in.readVInt(); @@ -176,7 +176,7 @@ public NodeStats(StreamInput in) throws IOException { this.rejectedExecutionCount = 0; this.timeoutCount = 0; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { this.peakThroughput = in.readVLong(); this.throughputLastPeriod = in.readVLong(); this.avgInferenceTimeLastPeriod = in.readOptionalDouble(); @@ -185,14 +185,14 @@ public NodeStats(StreamInput in) throws IOException { this.throughputLastPeriod = 0; this.avgInferenceTimeLastPeriod = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.cacheHitCount = in.readOptionalVLong(); this.cacheHitCountLastPeriod = in.readOptionalVLong(); } else { this.cacheHitCount = null; this.cacheHitCountLastPeriod = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { this.avgInferenceTimeExcludingCacheHit = in.readOptionalDouble(); } else { this.avgInferenceTimeExcludingCacheHit = null; @@ -342,23 +342,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(pendingCount); out.writeOptionalWriteable(routingState); out.writeOptionalInstant(startTime); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalVInt(threadsPerAllocation); out.writeOptionalVInt(numberOfAllocations); out.writeVInt(errorCount); out.writeVInt(rejectedExecutionCount); out.writeVInt(timeoutCount); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeVLong(peakThroughput); out.writeVLong(throughputLastPeriod); out.writeOptionalDouble(avgInferenceTimeLastPeriod); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalVLong(cacheHitCount); out.writeOptionalVLong(cacheHitCountLastPeriod); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeOptionalDouble(avgInferenceTimeExcludingCacheHit); } } @@ -460,21 +460,21 @@ public AssignmentStats(StreamInput in) throws IOException { numberOfAllocations = in.readOptionalVInt(); queueCapacity = in.readOptionalVInt(); startTime = in.readInstant(); - nodeStats = in.readList(AssignmentStats.NodeStats::new); + nodeStats = in.readCollectionAsList(AssignmentStats.NodeStats::new); state = in.readOptionalEnum(AssignmentState.class); reason = in.readOptionalString(); allocationStatus = in.readOptionalWriteable(AllocationStatus::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { cacheSize = in.readOptionalWriteable(ByteSizeValue::readFrom); } else { cacheSize = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { priority = in.readEnum(Priority.class); } else { priority = Priority.NORMAL; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { deploymentId = in.readString(); } else { deploymentId = modelId; @@ -632,21 +632,21 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(numberOfAllocations); out.writeOptionalVInt(queueCapacity); out.writeInstant(startTime); - out.writeList(nodeStats); - if (AssignmentState.FAILED.equals(state) && out.getTransportVersion().before(TransportVersion.V_8_4_0)) { + out.writeCollection(nodeStats); + if (AssignmentState.FAILED.equals(state) && out.getTransportVersion().before(TransportVersions.V_8_4_0)) { out.writeOptionalEnum(AssignmentState.STARTING); } else { out.writeOptionalEnum(state); } out.writeOptionalString(reason); out.writeOptionalWriteable(allocationStatus); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalWriteable(cacheSize); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeEnum(priority); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(deploymentId); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java index 14886f42be877..fd2f3627e3fb1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfo.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.assignment; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -67,7 +67,7 @@ public RoutingInfo(int currentAllocations, int targetAllocations, RoutingState s } public RoutingInfo(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.currentAllocations = in.readVInt(); this.targetAllocations = in.readVInt(); } else { @@ -101,7 +101,7 @@ public boolean isOutdated() { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeVInt(currentAllocations); out.writeVInt(targetAllocations); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoUpdate.java index 17195c2203da8..8458db7a3d641 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/RoutingInfoUpdate.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.assignment; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -35,7 +35,7 @@ private RoutingInfoUpdate(Optional numberOfAllocations, Optional w.writeTo(o)); + out.writeMap(nodeRoutingTable, StreamOutput::writeWriteable); out.writeEnum(assignmentState); out.writeOptionalString(reason); out.writeInstant(startTime); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeVInt(maxAssignedAllocations); } } @@ -410,6 +410,15 @@ public Builder updateExistingRoutingEntry(String nodeId, RoutingInfo routingInfo return this; } + /** + * Adds the {@link RoutingInfo} regardless of whether it already exists. + */ + public Builder addOrOverwriteRoutingEntry(String nodeId, RoutingInfo routingInfo) { + nodeRoutingTable.put(nodeId, routingInfo); + + return this; + } + public Builder removeRoutingEntry(String nodeId) { nodeRoutingTable.remove(nodeId); return this; @@ -465,6 +474,12 @@ public Builder clearReason() { return this; } + public Builder clearNodeRoutingTable() { + nodeRoutingTable.clear(); + + return this; + } + public Builder setNumberOfAllocations(int numberOfAllocations) { this.taskParams = new StartTrainedModelDeploymentAction.TaskParams( taskParams.getModelId(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/FrequencyEncoding.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/FrequencyEncoding.java index 034b6721ec1ec..639890ddfda19 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/FrequencyEncoding.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/FrequencyEncoding.java @@ -159,7 +159,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeString(featureName); - out.writeMap(frequencyMap, StreamOutput::writeString, StreamOutput::writeDouble); + out.writeMap(frequencyMap, StreamOutput::writeDouble); out.writeBoolean(custom); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/Multi.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/Multi.java index bd5b6d980d45b..4faf861b9d90f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/Multi.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/Multi.java @@ -122,7 +122,7 @@ public Multi(PreProcessor[] processors, Boolean custom) { } public Multi(StreamInput in) throws IOException { - this.processors = in.readNamedWriteableList(PreProcessor.class).toArray(PreProcessor[]::new); + this.processors = in.readNamedWriteableCollectionAsList(PreProcessor.class).toArray(PreProcessor[]::new); this.custom = in.readBoolean(); this.outputFields = in.readOrderedMap(StreamInput::readString, StreamInput::readString); this.inputFields = in.readStringArray(); @@ -130,9 +130,9 @@ public Multi(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(Arrays.asList(processors)); + out.writeNamedWriteableCollection(Arrays.asList(processors)); out.writeBoolean(custom); - out.writeMap(outputFields, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(outputFields, StreamOutput::writeString); out.writeStringArray(inputFields); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/OneHotEncoding.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/OneHotEncoding.java index cd0a91466dced..cb90b3e171e85 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/OneHotEncoding.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/OneHotEncoding.java @@ -143,7 +143,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); - out.writeMap(hotMap, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(hotMap, StreamOutput::writeString); out.writeBoolean(custom); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java index ac69bec04c619..60bbe6a760a97 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/preprocessing/TargetMeanEncoding.java @@ -171,7 +171,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { out.writeString(field); out.writeString(featureName); - out.writeMap(meanMap, StreamOutput::writeString, StreamOutput::writeDouble); + out.writeMap(meanMap, StreamOutput::writeDouble); out.writeDouble(defaultValue); out.writeBoolean(custom); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationFeatureImportance.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationFeatureImportance.java index ef918209bf19e..f59a8c996810c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationFeatureImportance.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationFeatureImportance.java @@ -62,7 +62,7 @@ public ClassificationFeatureImportance(String featureName, List public ClassificationFeatureImportance(StreamInput in) throws IOException { this.featureName = in.readString(); - this.classImportance = in.readList(ClassImportance::new); + this.classImportance = in.readCollectionAsList(ClassImportance::new); } public List getClassImportance() { @@ -85,7 +85,7 @@ public double getTotalImportance() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(featureName); - out.writeList(classImportance); + out.writeCollection(classImportance); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java index cb008062cc0b2..475e39c8eb3d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/ClassificationInferenceResults.java @@ -118,9 +118,9 @@ static List takeTopFeatureImportances( public ClassificationInferenceResults(StreamInput in) throws IOException { super(in); - this.featureImportance = in.readList(ClassificationFeatureImportance::new); + this.featureImportance = in.readCollectionAsList(ClassificationFeatureImportance::new); this.classificationLabel = in.readOptionalString(); - this.topClasses = in.readImmutableList(TopClassEntry::new); + this.topClasses = in.readCollectionAsImmutableList(TopClassEntry::new); this.topNumClassesField = in.readString(); this.resultsField = in.readString(); this.predictionFieldType = in.readEnum(PredictionFieldType.class); @@ -147,7 +147,7 @@ public List getFeatureImportance() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(featureImportance); + out.writeCollection(featureImportance); out.writeOptionalString(classificationLabel); out.writeCollection(topClasses); out.writeString(topNumClassesField); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NerResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NerResults.java index fd1751387942c..b077c93c141a5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NerResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NerResults.java @@ -40,7 +40,7 @@ public NerResults(String resultsField, String annotatedResult, List public NerResults(StreamInput in) throws IOException { super(in); - entityGroups = in.readList(EntityGroup::new); + entityGroups = in.readCollectionAsList(EntityGroup::new); resultsField = in.readString(); annotatedResult = in.readString(); } @@ -62,7 +62,7 @@ public String getWriteableName() { @Override void doWriteTo(StreamOutput out) throws IOException { - out.writeList(entityGroups); + out.writeCollection(entityGroups); out.writeString(resultsField); out.writeString(annotatedResult); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResults.java index 7f18a63535203..7556b223dd317 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/NlpClassificationInferenceResults.java @@ -45,7 +45,7 @@ public NlpClassificationInferenceResults( public NlpClassificationInferenceResults(StreamInput in) throws IOException { super(in); this.classificationLabel = in.readString(); - this.topClasses = in.readImmutableList(TopClassEntry::new); + this.topClasses = in.readCollectionAsImmutableList(TopClassEntry::new); this.resultsField = in.readString(); this.predictionProbability = in.readOptionalDouble(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResults.java index 85f326f244bde..0c60b05320cec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/QuestionAnsweringInferenceResults.java @@ -62,7 +62,7 @@ public QuestionAnsweringInferenceResults(StreamInput in) throws IOException { this.answer = in.readString(); this.startOffset = in.readVInt(); this.endOffset = in.readVInt(); - this.topClasses = in.readImmutableList(TopAnswerEntry::fromStream); + this.topClasses = in.readCollectionAsImmutableList(TopAnswerEntry::fromStream); this.resultsField = in.readString(); this.score = in.readDouble(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResults.java index dcab4d9508c80..e0af785f827e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/RegressionInferenceResults.java @@ -74,14 +74,14 @@ static List takeTopFeatureImportances( public RegressionInferenceResults(StreamInput in) throws IOException { super(in); - this.featureImportance = in.readList(RegressionFeatureImportance::new); + this.featureImportance = in.readCollectionAsList(RegressionFeatureImportance::new); this.resultsField = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(featureImportance); + out.writeCollection(featureImportance); out.writeString(resultsField); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResults.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResults.java index 93f6780f05eec..0dfc0dccbb8f6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResults.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/results/TextExpansionResults.java @@ -64,7 +64,7 @@ public TextExpansionResults(String resultField, List weightedToke public TextExpansionResults(StreamInput in) throws IOException { super(in); this.resultsField = in.readString(); - this.weightedTokens = in.readList(WeightedToken::new); + this.weightedTokens = in.readCollectionAsList(WeightedToken::new); } public List getWeightedTokens() { @@ -112,7 +112,7 @@ public int hashCode() { @Override void doWriteTo(StreamOutput out) throws IOException { out.writeString(resultsField); - out.writeList(weightedTokens); + out.writeCollection(weightedTokens); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/AbstractTokenizationUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/AbstractTokenizationUpdate.java index 263083fa26be3..0431472302414 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/AbstractTokenizationUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/AbstractTokenizationUpdate.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -34,7 +34,7 @@ public AbstractTokenizationUpdate(@Nullable Tokenization.Truncate truncate, @Nul public AbstractTokenizationUpdate(StreamInput in) throws IOException { this.truncate = in.readOptionalEnum(Tokenization.Truncate.class); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { this.span = in.readOptionalInt(); } else { this.span = null; @@ -62,7 +62,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalEnum(truncate); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeOptionalInt(span); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfig.java index 5722011fcb4f3..5f5b42605ed7e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfig.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -28,7 +29,7 @@ public class ClassificationConfig implements LenientlyParsedInferenceConfig, Str public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); public static final ParseField PREDICTION_FIELD_TYPE = new ParseField("prediction_field_type"); private static final MlConfigVersion MIN_SUPPORTED_VERSION = MlConfigVersion.V_7_6_0; - private static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersion.V_7_6_0; + private static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_6_0; public static ClassificationConfig EMPTY_PARAMS = new ClassificationConfig( 0, @@ -200,7 +201,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return requestingImportance() ? TransportVersion.V_7_7_0 : MIN_SUPPORTED_TRANSPORT_VERSION; + return requestingImportance() ? TransportVersions.V_7_7_0 : MIN_SUPPORTED_TRANSPORT_VERSION; } public static Builder builder() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfigUpdate.java index 07877776fd69b..a036427abbe48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ClassificationConfigUpdate.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -250,7 +251,7 @@ boolean isNoop(ClassificationConfig originalConfig) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_8_0; + return TransportVersions.V_7_8_0; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java index f5287769e64a6..0ba74df1f8d54 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/EmptyConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.ml.MlConfigVersion; @@ -68,7 +69,7 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfig.java index 01035cdf8d2c9..24b7a95c9ccac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -139,7 +140,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfigUpdate.java index 1518db7df713c..cb081aa48d0a2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/FillMaskConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -104,7 +105,7 @@ public String getName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java index da4d72c656ffd..25a2055e00f68 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfig.java @@ -80,7 +80,7 @@ public LearnToRankConfig(Integer numTopFeatureImportanceValues, List getFeatureExtractorBuilders() { @@ -120,7 +120,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeNamedWriteableList(featureExtractorBuilders); + out.writeNamedWriteableCollection(featureExtractorBuilders); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java index 88de41fbcd815..b4241f1704520 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/LearnToRankConfigUpdate.java @@ -92,7 +92,7 @@ public LearnToRankConfigUpdate( public LearnToRankConfigUpdate(StreamInput in) throws IOException { this.numTopFeatureImportanceValues = in.readOptionalVInt(); - this.featureExtractorBuilderList = in.readNamedWriteableList(LearnToRankFeatureExtractorBuilder.class); + this.featureExtractorBuilderList = in.readNamedWriteableCollectionAsList(LearnToRankFeatureExtractorBuilder.class); } public Integer getNumTopFeatureImportanceValues() { @@ -117,7 +117,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalVInt(numTopFeatureImportanceValues); - out.writeNamedWriteableList(featureExtractorBuilderList); + out.writeNamedWriteableCollection(featureExtractorBuilderList); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java index 0fdfa6976af8a..5014170c810e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ModelPackageConfig.java @@ -160,7 +160,7 @@ public ModelPackageConfig(StreamInput in) throws IOException { this.inferenceConfigSource = in.readMap(); this.metadata = in.readMap(); this.modelType = in.readOptionalString(); - this.tags = in.readOptionalList(StreamInput::readString); + this.tags = in.readOptionalCollectionAsList(StreamInput::readString); this.vocabularyFile = in.readOptionalString(); } @@ -266,7 +266,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(inferenceConfigSource); out.writeGenericMap(metadata); out.writeOptionalString(modelType); - out.writeOptionalCollection(tags, StreamOutput::writeString); + out.writeOptionalStringCollection(tags); out.writeOptionalString(vocabularyFile); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfig.java index 7e46452a7abb3..e7f3a66b6748f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -119,7 +120,7 @@ public NerConfig( public NerConfig(StreamInput in) throws IOException { vocabularyConfig = new VocabularyConfig(in); tokenization = in.readNamedWriteable(Tokenization.class); - classificationLabels = in.readStringList(); + classificationLabels = in.readStringCollectionAsList(); resultsField = in.readOptionalString(); } @@ -163,7 +164,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfigUpdate.java index 8a685e46733ab..884ecb39df448 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NerConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -147,7 +148,7 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfigUpdate.java index fc9b1bee1a4be..92e44edcd1259 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/NlpConfigUpdate.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -82,7 +82,7 @@ public NlpConfigUpdate(@Nullable TokenizationUpdate tokenizationUpdate) { } public NlpConfigUpdate(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { tokenizationUpdate = in.readOptionalNamedWriteable(TokenizationUpdate.class); } else { tokenizationUpdate = null; @@ -91,7 +91,7 @@ public NlpConfigUpdate(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalNamedWriteable(tokenizationUpdate); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfig.java index b8e953da3dbb3..74ca76779d4b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -126,7 +127,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigUpdate.java index 736cfb8644613..874f82dc019ca 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/PassThroughConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -149,7 +150,7 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfig.java index 311f3f9c1381a..7572d757f2b5f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -194,7 +195,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_3_0; + return TransportVersions.V_8_3_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigUpdate.java index 6e980564ba27a..40657544a14d5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/QuestionAnsweringConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -253,6 +254,6 @@ public QuestionAnsweringConfigUpdate build() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_3_0; + return TransportVersions.V_8_3_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfig.java index 48d1866ec4b9a..04365d5c7ec1c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfig.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -22,7 +23,7 @@ public class RegressionConfig implements LenientlyParsedInferenceConfig, Strictl public static final ParseField NAME = new ParseField("regression"); private static final MlConfigVersion MIN_SUPPORTED_VERSION = MlConfigVersion.V_7_6_0; - private static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersion.V_7_6_0; + private static final TransportVersion MIN_SUPPORTED_TRANSPORT_VERSION = TransportVersions.V_7_6_0; public static final ParseField RESULTS_FIELD = new ParseField("results_field"); public static final ParseField NUM_TOP_FEATURE_IMPORTANCE_VALUES = new ParseField("num_top_feature_importance_values"); @@ -142,7 +143,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return requestingImportance() ? TransportVersion.V_7_7_0 : MIN_SUPPORTED_TRANSPORT_VERSION; + return requestingImportance() ? TransportVersions.V_7_7_0 : MIN_SUPPORTED_TRANSPORT_VERSION; } public static Builder builder() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfigUpdate.java index 4c187ef28416b..a678806181ef8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/RegressionConfigUpdate.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -113,7 +114,7 @@ public String getName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_8_0; + return TransportVersions.V_7_8_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java index 514c48e6c42fa..fe1fb9844610d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ResultsFieldUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.ml.utils.ExceptionsHelper; @@ -71,7 +72,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfig.java index a7e06557319f5..ab50f26636fc4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -94,7 +95,7 @@ public TextClassificationConfig( public TextClassificationConfig(StreamInput in) throws IOException { vocabularyConfig = new VocabularyConfig(in); tokenization = in.readNamedWriteable(Tokenization.class); - classificationLabels = in.readStringList(); + classificationLabels = in.readStringCollectionAsList(); numTopClasses = in.readInt(); resultsField = in.readOptionalString(); } @@ -139,7 +140,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfigUpdate.java index 584331b6d6cb2..460a3a685d534 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextClassificationConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -82,7 +83,7 @@ public TextClassificationConfigUpdate( public TextClassificationConfigUpdate(StreamInput in) throws IOException { super(in); - classificationLabels = in.readOptionalStringList(); + classificationLabels = in.readOptionalStringCollectionAsList(); numTopClasses = in.readOptionalVInt(); resultsField = in.readOptionalString(); } @@ -99,7 +100,7 @@ public String getName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfig.java index da67c26df0968..518b9eb62d793 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -102,7 +103,7 @@ public TextEmbeddingConfig(StreamInput in) throws IOException { vocabularyConfig = new VocabularyConfig(in); tokenization = in.readNamedWriteable(Tokenization.class); resultsField = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { embeddingSize = in.readOptionalVInt(); } else { embeddingSize = null; @@ -134,7 +135,7 @@ public void writeTo(StreamOutput out) throws IOException { vocabularyConfig.writeTo(out); out.writeNamedWriteable(tokenization); out.writeOptionalString(resultsField); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalVInt(embeddingSize); } } @@ -151,7 +152,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdate.java index 68d89718ffa11..6acd2d209a875 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.ObjectParser; @@ -100,7 +101,7 @@ public String getName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java index 5631db5a2d7eb..d8315bec14153 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -132,7 +133,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_7_0; + return TransportVersions.V_8_7_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdate.java index 5102e509aa10a..181cadbaf7168 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -161,7 +162,7 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_7_0; + return TransportVersions.V_8_7_0; } public static class Builder implements InferenceConfigUpdate.Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java index 96b2f9e54fc5d..5511df03e6f36 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -155,7 +156,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_5_0; + return TransportVersions.V_8_5_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java index bd4788393a40c..c7afacc07b944 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -211,6 +212,6 @@ public TextSimilarityConfigUpdate build() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_5_0; + return TransportVersions.V_8_5_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/Tokenization.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/Tokenization.java index 444726d6a9479..ef437e0201510 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/Tokenization.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/Tokenization.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -125,7 +125,7 @@ public Tokenization(StreamInput in) throws IOException { this.withSpecialTokens = in.readBoolean(); this.maxSequenceLength = in.readVInt(); this.truncate = in.readEnum(Truncate.class); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { this.span = in.readInt(); } else { this.span = UNSET_SPAN_VALUE; @@ -138,7 +138,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(withSpecialTokens); out.writeVInt(maxSequenceLength); out.writeEnum(truncate); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeInt(span); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java index ede41feeb8de7..42e0bdac6e83a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TrainedModel.java @@ -8,6 +8,7 @@ import org.apache.lucene.util.Accountable; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.xpack.core.ml.utils.NamedXContentObject; @@ -33,6 +34,6 @@ public interface TrainedModel extends NamedXContentObject, NamedWriteable, Accou long estimatedNumOperations(); default TransportVersion getMinimalCompatibilityVersion() { - return TransportVersion.V_7_6_0; + return TransportVersions.V_7_6_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java index bd9507c033af0..ba4c130b987d2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfig.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -151,10 +152,10 @@ public ZeroShotClassificationConfig( public ZeroShotClassificationConfig(StreamInput in) throws IOException { vocabularyConfig = new VocabularyConfig(in); tokenization = in.readNamedWriteable(Tokenization.class); - classificationLabels = in.readStringList(); + classificationLabels = in.readStringCollectionAsList(); isMultiLabel = in.readBoolean(); hypothesisTemplate = in.readString(); - labels = in.readOptionalStringList(); + labels = in.readOptionalStringCollectionAsList(); resultsField = in.readOptionalString(); } @@ -204,7 +205,7 @@ public MlConfigVersion getMinimalSupportedMlConfigVersion() { @Override public TransportVersion getMinimalSupportedTransportVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java index 02ed163d389b9..47fd75ed6ff42 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdate.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -88,7 +89,7 @@ public ZeroShotClassificationConfigUpdate( public ZeroShotClassificationConfigUpdate(StreamInput in) throws IOException { super(in); - labels = in.readOptionalStringList(); + labels = in.readOptionalStringCollectionAsList(); isMultiLabel = in.readOptionalBoolean(); resultsField = in.readOptionalString(); } @@ -244,6 +245,6 @@ public ZeroShotClassificationConfigUpdate build() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java index d0b7fd1b1f376..9afad760e1b7f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ensemble/Ensemble.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Accountables; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -114,12 +115,12 @@ public List getModels() { } public Ensemble(StreamInput in) throws IOException { - this.featureNames = in.readImmutableList(StreamInput::readString); - this.models = Collections.unmodifiableList(in.readNamedWriteableList(TrainedModel.class)); + this.featureNames = in.readCollectionAsImmutableList(StreamInput::readString); + this.models = Collections.unmodifiableList(in.readNamedWriteableCollectionAsList(TrainedModel.class)); this.outputAggregator = in.readNamedWriteable(OutputAggregator.class); this.targetType = TargetType.fromStream(in); if (in.readBoolean()) { - this.classificationLabels = in.readStringList(); + this.classificationLabels = in.readStringCollectionAsList(); } else { this.classificationLabels = null; } @@ -143,7 +144,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(featureNames); - out.writeNamedWriteableList(models); + out.writeNamedWriteableCollection(models); out.writeNamedWriteable(outputAggregator); targetType.writeTo(out); out.writeBoolean(classificationLabels != null); @@ -285,7 +286,7 @@ public TransportVersion getMinimalCompatibilityVersion() { return models.stream() .map(TrainedModel::getMinimalCompatibilityVersion) .max(TransportVersion::compareTo) - .orElse(TransportVersion.V_7_6_0); + .orElse(TransportVersions.V_7_6_0); } public static class Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/FeatureImportanceBaseline.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/FeatureImportanceBaseline.java index 59108d7cb8742..e1e6e2299fcc6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/FeatureImportanceBaseline.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/FeatureImportanceBaseline.java @@ -61,7 +61,7 @@ public static FeatureImportanceBaseline fromXContent(XContentParser parser, bool public FeatureImportanceBaseline(StreamInput in) throws IOException { this.baseline = in.readOptionalDouble(); - this.classBaselines = in.readList(ClassBaseline::new); + this.classBaselines = in.readCollectionAsList(ClassBaseline::new); } public FeatureImportanceBaseline(Double baseline, List classBaselines) { @@ -72,7 +72,7 @@ public FeatureImportanceBaseline(Double baseline, List classBasel @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalDouble(baseline); - out.writeList(classBaselines); + out.writeCollection(classBaselines); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TotalFeatureImportance.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TotalFeatureImportance.java index fbe2d16211183..75139fb5fe6a9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TotalFeatureImportance.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TotalFeatureImportance.java @@ -73,7 +73,7 @@ public static TotalFeatureImportance fromXContent(XContentParser parser, boolean public TotalFeatureImportance(StreamInput in) throws IOException { this.featureName = in.readString(); this.importance = in.readOptionalWriteable(Importance::new); - this.classImportances = in.readList(ClassImportance::new); + this.classImportances = in.readCollectionAsList(ClassImportance::new); } TotalFeatureImportance(String featureName, @Nullable Importance importance, @Nullable List classImportances) { @@ -86,7 +86,7 @@ public TotalFeatureImportance(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(featureName); out.writeOptionalWriteable(importance); - out.writeList(classImportances); + out.writeCollection(classImportances); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TrainedModelMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TrainedModelMetadata.java index ee8f807b00010..e07f99ae90c4a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TrainedModelMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/metadata/TrainedModelMetadata.java @@ -86,9 +86,9 @@ public static String modelId(String docId) { public TrainedModelMetadata(StreamInput in) throws IOException { this.modelId = in.readString(); - this.totalFeatureImportances = in.readList(TotalFeatureImportance::new); + this.totalFeatureImportances = in.readCollectionAsList(TotalFeatureImportance::new); this.featureImportanceBaselines = in.readOptionalWriteable(FeatureImportanceBaseline::new); - this.hyperparameters = in.readList(Hyperparameters::new); + this.hyperparameters = in.readCollectionAsList(Hyperparameters::new); } public TrainedModelMetadata( @@ -142,9 +142,9 @@ public int hashCode() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(modelId); - out.writeList(totalFeatureImportances); + out.writeCollection(totalFeatureImportances); out.writeOptionalWriteable(featureImportanceBaselines); - out.writeList(hyperparameters); + out.writeCollection(hyperparameters); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java index eec3107f5bfa6..458cbb2b602f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/tree/Tree.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.Accountables; import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -81,11 +82,11 @@ public static Tree fromXContentLenient(XContentParser parser) { } public Tree(StreamInput in) throws IOException { - this.featureNames = in.readImmutableList(StreamInput::readString); - this.nodes = in.readImmutableList(TreeNode::new); + this.featureNames = in.readCollectionAsImmutableList(StreamInput::readString); + this.nodes = in.readCollectionAsImmutableList(TreeNode::new); this.targetType = TargetType.fromStream(in); if (in.readBoolean()) { - this.classificationLabels = in.readImmutableList(StreamInput::readString); + this.classificationLabels = in.readCollectionAsImmutableList(StreamInput::readString); } else { this.classificationLabels = null; } @@ -289,9 +290,9 @@ public Collection getChildResources() { @Override public TransportVersion getMinimalCompatibilityVersion() { if (nodes.stream().filter(TreeNode::isLeaf).anyMatch(t -> t.getLeafValue().length > 1)) { - return TransportVersion.V_7_7_0; + return TransportVersions.V_7_7_0; } - return TransportVersion.V_7_6_0; + return TransportVersions.V_7_6_0; } public static class Builder { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java index ca648cfc18fe8..ea893a69fa392 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/AnalysisConfig.java @@ -174,13 +174,13 @@ private AnalysisConfig( public AnalysisConfig(StreamInput in) throws IOException { bucketSpan = in.readTimeValue(); categorizationFieldName = in.readOptionalString(); - categorizationFilters = in.readBoolean() ? in.readImmutableList(StreamInput::readString) : null; + categorizationFilters = in.readBoolean() ? in.readCollectionAsImmutableList(StreamInput::readString) : null; categorizationAnalyzerConfig = in.readOptionalWriteable(CategorizationAnalyzerConfig::new); perPartitionCategorizationConfig = new PerPartitionCategorizationConfig(in); latency = in.readOptionalTimeValue(); summaryCountFieldName = in.readOptionalString(); - detectors = in.readImmutableList(Detector::new); - influencers = in.readImmutableList(StreamInput::readString); + detectors = in.readCollectionAsImmutableList(Detector::new); + influencers = in.readCollectionAsImmutableList(StreamInput::readString); multivariateByFields = in.readOptionalBoolean(); modelPruneWindow = in.readOptionalTimeValue(); @@ -200,7 +200,7 @@ public void writeTo(StreamOutput out) throws IOException { perPartitionCategorizationConfig.writeTo(out); out.writeOptionalTimeValue(latency); out.writeOptionalString(summaryCountFieldName); - out.writeList(detectors); + out.writeCollection(detectors); out.writeStringCollection(influencers); out.writeOptionalBoolean(multivariateByFields); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java index fca4979261dd0..ac934a71ec311 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/CategorizationAnalyzerConfig.java @@ -229,17 +229,17 @@ private CategorizationAnalyzerConfig( public CategorizationAnalyzerConfig(StreamInput in) throws IOException { analyzer = in.readOptionalString(); - charFilters = in.readList(NameOrDefinition::new); + charFilters = in.readCollectionAsList(NameOrDefinition::new); tokenizer = in.readOptionalWriteable(NameOrDefinition::new); - tokenFilters = in.readList(NameOrDefinition::new); + tokenFilters = in.readCollectionAsList(NameOrDefinition::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(analyzer); - out.writeList(charFilters); + out.writeCollection(charFilters); out.writeOptionalWriteable(tokenizer); - out.writeList(tokenFilters); + out.writeCollection(tokenFilters); } public String getAnalyzer() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java index 4cd8aae3561d4..65a4524b1bcff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DataDescription.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.job.config; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -131,12 +131,12 @@ public DataDescription(String timeFieldName, String timeFormat) { } public DataDescription(StreamInput in) throws IOException { - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { DataFormat.readFromStream(in); } timeFieldName = in.readString(); timeFormat = in.readString(); - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // fieldDelimiter if (in.readBoolean()) { in.read(); @@ -150,12 +150,12 @@ public DataDescription(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { DataFormat.XCONTENT.writeTo(out); } out.writeString(timeFieldName); out.writeString(timeFormat); - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // fieldDelimiter out.writeBoolean(false); // quoteCharacter diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java index ddfccfc7f62e0..ff2921d755b40 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/DetectionRule.java @@ -62,14 +62,14 @@ private DetectionRule(EnumSet actions, RuleScope scope, List value.writeTo(out1)); + out.writeMap(scope, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java index 06def9204686e..883c94093a2c5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/process/autodetect/output/FlushAcknowledgement.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.job.process.autodetect.output; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -66,7 +66,7 @@ public FlushAcknowledgement(String id, Instant lastFinalizedBucketEnd, Boolean r public FlushAcknowledgement(StreamInput in) throws IOException { id = in.readString(); lastFinalizedBucketEnd = in.readOptionalInstant(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { refreshRequired = in.readBoolean(); } else { refreshRequired = true; @@ -77,7 +77,7 @@ public FlushAcknowledgement(StreamInput in) throws IOException { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeOptionalInstant(lastFinalizedBucketEnd); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_012)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(refreshRequired); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java index 685dc93cf962c..bef1c0799ba8f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyCause.java @@ -126,7 +126,7 @@ public AnomalyCause(StreamInput in) throws IOException { overFieldName = in.readOptionalString(); overFieldValue = in.readOptionalString(); if (in.readBoolean()) { - influencers = in.readList(Influence::new); + influencers = in.readCollectionAsList(Influence::new); } geoResults = in.readOptionalWriteable(GeoResults::new); } @@ -157,7 +157,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasInfluencers = influencers != null; out.writeBoolean(hasInfluencers); if (hasInfluencers) { - out.writeList(influencers); + out.writeCollection(influencers); } out.writeOptionalWriteable(geoResults); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java index 2454a169685e0..54f77655e2822 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyRecord.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -207,17 +207,17 @@ public AnomalyRecord(StreamInput in) throws IOException { } isInterim = in.readBoolean(); if (in.readBoolean()) { - causes = in.readList(AnomalyCause::new); + causes = in.readCollectionAsList(AnomalyCause::new); } recordScore = in.readDouble(); initialRecordScore = in.readDouble(); timestamp = new Date(in.readLong()); bucketSpan = in.readLong(); if (in.readBoolean()) { - influences = in.readList(Influence::new); + influences = in.readCollectionAsList(Influence::new); } geoResults = in.readOptionalWriteable(GeoResults::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { anomalyScoreExplanation = in.readOptionalWriteable(AnomalyScoreExplanation::new); } } @@ -252,7 +252,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasCauses = causes != null; out.writeBoolean(hasCauses); if (hasCauses) { - out.writeList(causes); + out.writeCollection(causes); } out.writeDouble(recordScore); out.writeDouble(initialRecordScore); @@ -261,10 +261,10 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasInfluencers = influences != null; out.writeBoolean(hasInfluencers); if (hasInfluencers) { - out.writeList(influences); + out.writeCollection(influences); } out.writeOptionalWriteable(geoResults); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeOptionalWriteable(anomalyScoreExplanation); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyScoreExplanation.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyScoreExplanation.java index 17c0da6a7020f..cb19f7703ed42 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyScoreExplanation.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/AnomalyScoreExplanation.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.ml.job.results; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -88,10 +88,10 @@ public AnomalyScoreExplanation(StreamInput in) throws IOException { this.upperConfidenceBound = in.readOptionalDouble(); this.highVariancePenalty = in.readOptionalBoolean(); this.incompleteBucketPenalty = in.readOptionalBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.multimodalDistribution = in.readOptionalBoolean(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.byFieldFirstOccurrence = in.readOptionalBoolean(); this.byFieldRelativeRarity = in.readOptionalDouble(); } @@ -109,10 +109,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalDouble(upperConfidenceBound); out.writeOptionalBoolean(highVariancePenalty); out.writeOptionalBoolean(incompleteBucketPenalty); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalBoolean(multimodalDistribution); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalBoolean(byFieldFirstOccurrence); out.writeOptionalDouble(byFieldRelativeRarity); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java index 92600df4f0569..b4798b404a434 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Bucket.java @@ -143,12 +143,12 @@ public Bucket(StreamInput in) throws IOException { anomalyScore = in.readDouble(); bucketSpan = in.readLong(); initialAnomalyScore = in.readDouble(); - records = in.readList(AnomalyRecord::new); + records = in.readCollectionAsList(AnomalyRecord::new); eventCount = in.readLong(); isInterim = in.readBoolean(); - bucketInfluencers = in.readList(BucketInfluencer::new); + bucketInfluencers = in.readCollectionAsList(BucketInfluencer::new); processingTimeMs = in.readLong(); - scheduledEvents = in.readStringList(); + scheduledEvents = in.readStringCollectionAsList(); if (scheduledEvents.isEmpty()) { scheduledEvents = Collections.emptyList(); } @@ -161,10 +161,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(anomalyScore); out.writeLong(bucketSpan); out.writeDouble(initialAnomalyScore); - out.writeList(records); + out.writeCollection(records); out.writeLong(eventCount); out.writeBoolean(isInterim); - out.writeList(bucketInfluencers); + out.writeCollection(bucketInfluencers); out.writeLong(processingTimeMs); out.writeStringCollection(scheduledEvents); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java index bc258167140c7..7d387a223b050 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/CategoryDefinition.java @@ -97,7 +97,7 @@ public CategoryDefinition(StreamInput in) throws IOException { terms = in.readString(); regex = in.readString(); maxMatchingLength = in.readLong(); - examples = new TreeSet<>(in.readStringList()); + examples = new TreeSet<>(in.readStringCollectionAsList()); grokPattern = in.readOptionalString(); this.preferredToCategories = in.readVLongArray(); this.numMatches = in.readVLong(); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java index 3c0ceb64b5111..620618becbb3e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/ForecastRequestStats.java @@ -152,7 +152,7 @@ public ForecastRequestStats(StreamInput in) throws IOException { forecastId = in.readString(); recordCount = in.readLong(); if (in.readBoolean()) { - messages = in.readStringList(); + messages = in.readStringCollectionAsList(); } else { messages = null; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java index 7492f4d6d3aa3..ef90d070520ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/Influence.java @@ -64,7 +64,7 @@ public Influence(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); - out.writeStringArray(fieldValues.toArray(new String[fieldValues.size()])); + out.writeStringCollection(fieldValues); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java index a7d46cfd981e4..c04a61951ad99 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/results/OverallBucket.java @@ -55,7 +55,7 @@ public OverallBucket(StreamInput in) throws IOException { timestamp = new Date(in.readLong()); bucketSpan = in.readLong(); overallScore = in.readDouble(); - jobs = in.readList(JobInfo::new); + jobs = in.readCollectionAsList(JobInfo::new); isInterim = in.readBoolean(); } @@ -64,7 +64,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(timestamp.getTime()); out.writeLong(bucketSpan); out.writeDouble(overallScore); - out.writeList(jobs); + out.writeCollection(jobs); out.writeBoolean(isInterim); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/snapshot/upgrade/SnapshotUpgradeTaskParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/snapshot/upgrade/SnapshotUpgradeTaskParams.java index a0c707edf3cd5..c159f997798a9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/snapshot/upgrade/SnapshotUpgradeTaskParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/snapshot/upgrade/SnapshotUpgradeTaskParams.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.job.snapshot.upgrade; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.persistent.PersistentTaskParams; @@ -68,7 +69,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_11_0; + return TransportVersions.V_7_11_0; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java index 60e4f748df217..c085b37a20c60 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/stats/CountAccumulator.java @@ -63,7 +63,7 @@ public static CountAccumulator fromTermsAggregation(StringTerms termsAggregation } public void writeTo(StreamOutput out) throws IOException { - out.writeMap(counts, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(counts, StreamOutput::writeLong); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java index 67615b6b79c65..546c139766248 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/MonitoringFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.monitoring; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -39,7 +40,7 @@ public MonitoringFeatureSetUsage(boolean collectionEnabled, Map @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } public Map getExporters() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java index 11e956c3ae780..7dbb1e8b218b7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringBulkRequest.java @@ -38,7 +38,7 @@ public MonitoringBulkRequest() {} public MonitoringBulkRequest(StreamInput in) throws IOException { super(in); - docs.addAll(in.readList(MonitoringBulkDoc::new)); + docs.addAll(in.readCollectionAsList(MonitoringBulkDoc::new)); } /** @@ -120,6 +120,6 @@ public MonitoringBulkRequest add( @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(docs); + out.writeCollection(docs); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringMigrateAlertsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringMigrateAlertsResponse.java index 1e88ee7624a16..72b307dc6d782 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringMigrateAlertsResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/monitoring/action/MonitoringMigrateAlertsResponse.java @@ -31,12 +31,12 @@ public MonitoringMigrateAlertsResponse(List exporters) public MonitoringMigrateAlertsResponse(StreamInput in) throws IOException { super(in); - this.exporters = in.readList(ExporterMigrationResult::new); + this.exporters = in.readCollectionAsList(ExporterMigrationResult::new); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(exporters); + out.writeCollection(exporters); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java index 671349362dd00..fd5cf1c41b466 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/RollupFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.rollup; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; @@ -25,7 +26,7 @@ public RollupFeatureSetUsage() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java index 86e3720468c79..2b9fa95f2bfd1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupCapsAction.java @@ -125,7 +125,7 @@ public Map getJobs() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(jobs, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(jobs, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java index cd5be88bf7b7c..8cb0a91f7c3f0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupIndexCapsAction.java @@ -154,7 +154,7 @@ public Map getJobs() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(jobs, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(jobs, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java index a71142cf718a1..a06ef484544d7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/GetRollupJobsAction.java @@ -143,13 +143,13 @@ public Response(List jobs, List taskFailures, public Response(StreamInput in) throws IOException { super(in); - jobs = in.readList(JobWrapper::new); + jobs = in.readCollectionAsList(JobWrapper::new); } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(jobs); + out.writeCollection(jobs); } public List getJobs() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java index 411f8b0db497e..4fc3e44a9a134 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollableIndexCaps.java @@ -38,7 +38,7 @@ public RollableIndexCaps(String indexName, List caps) { public RollableIndexCaps(StreamInput in) throws IOException { this.indexName = in.readString(); - this.jobCaps = in.readList(RollupJobCaps::new); + this.jobCaps = in.readCollectionAsList(RollupJobCaps::new); } public String getIndexName() { @@ -52,7 +52,7 @@ public List getJobCaps() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(indexName); - out.writeList(jobCaps); + out.writeCollection(jobCaps); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java index ceccfd4028fae..040f7e1637a50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/action/RollupJobCaps.java @@ -94,7 +94,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(jobID); out.writeString(rollupIndex); out.writeString(indexPattern); - out.writeMap(fieldCapLookup, StreamOutput::writeString, (o, value) -> value.writeTo(o)); + out.writeMap(fieldCapLookup, StreamOutput::writeWriteable); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java index 48cd2f0b1c204..64c925adc81ce 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/MetricConfig.java @@ -89,7 +89,7 @@ public MetricConfig(final String field, final List metrics) { public MetricConfig(final StreamInput in) throws IOException { field = in.readString(); - metrics = in.readStringList(); + metrics = in.readStringCollectionAsList(); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java index 683f39be9cc98..3918f96ae2c56 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJob.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.rollup.job; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; @@ -86,7 +87,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { config.writeTo(out); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); } static Diff readJobDiffFrom(StreamInput in) throws IOException { @@ -119,6 +120,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java index 73c480353511b..0a1c1d0989c32 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobConfig.java @@ -148,7 +148,7 @@ public RollupJobConfig(final StreamInput in) throws IOException { rollupIndex = in.readString(); cron = in.readString(); groupConfig = in.readOptionalWriteable(GroupConfig::new); - metricsConfig = in.readList(MetricConfig::new); + metricsConfig = in.readCollectionAsList(MetricConfig::new); timeout = in.readTimeValue(); pageSize = in.readInt(); indices = Strings.splitStringByCommaToArray(indexPattern); @@ -252,7 +252,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeString(rollupIndex); out.writeString(cron); out.writeOptionalWriteable(groupConfig); - out.writeList(metricsConfig); + out.writeCollection(metricsConfig); out.writeTimeValue(timeout); out.writeInt(pageSize); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java index 55e3ea54cea02..19c718073d702 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/rollup/job/RollupJobStatus.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.rollup.job; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -75,7 +75,7 @@ public RollupJobStatus(IndexerState state, @Nullable Map positio public RollupJobStatus(StreamInput in) throws IOException { state = IndexerState.fromStream(in); currentPosition = in.readBoolean() ? new TreeMap<>(in.readMap()) : null; - if (in.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_0_0)) { // 7.x nodes serialize `upgradedDocumentID` flag. We don't need it anymore, but // we need to pull it off the stream // This can go away completely in 9.0 @@ -122,7 +122,7 @@ public void writeTo(StreamOutput out) throws IOException { if (currentPosition != null) { out.writeGenericMap(currentPosition); } - if (out.getTransportVersion().before(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_0_0)) { // 7.x nodes expect a boolean `upgradedDocumentID` flag. We don't have it anymore, // but we need to tell them we are upgraded in case there is a mixed cluster // This can go away completely in 9.0 diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java index 4d544b6bceb38..7399a8e9c9e31 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/search/action/AsyncStatusResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.search.action; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.search.SearchResponse; import org.elasticsearch.common.io.stream.StreamInput; @@ -136,12 +136,12 @@ public AsyncStatusResponse(StreamInput in) throws IOException { this.skippedShards = in.readVInt(); this.failedShards = in.readVInt(); this.completionStatus = (this.isRunning == false) ? RestStatus.readFrom(in) : null; - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { this.clusters = in.readOptionalWriteable(SearchResponse.Clusters::new); } else { this.clusters = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_035)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_035)) { this.completionTimeMillis = in.readOptionalVLong(); } else { this.completionTimeMillis = null; @@ -162,11 +162,11 @@ public void writeTo(StreamOutput out) throws IOException { if (isRunning == false) { RestStatus.writeTo(out, completionStatus); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { // optional since only CCS uses is; it is null for local-only searches out.writeOptionalWriteable(clusters); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_035)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_035)) { out.writeOptionalVLong(completionTimeMillis); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java index eb1017fcd4399..276eedc6408e1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.searchablesnapshots; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.master.MasterNodeRequest; import org.elasticsearch.cluster.metadata.IndexMetadata; @@ -83,7 +84,7 @@ public class MountSearchableSnapshotRequest extends MasterNodeRequest nodes, Lis @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::new); + return in.readCollectionAsList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java index 7996741872fc4..4cf90a039a35c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationRequest.java @@ -74,7 +74,7 @@ public DelegatePkiAuthenticationRequest(StreamInput input) throws IOException { super(input); try { final CertificateFactory certificateFactory = CertificateFactory.getInstance("X.509"); - certificateChain = input.readImmutableList(in -> { + certificateChain = input.readCollectionAsImmutableList(in -> { try (ByteArrayInputStream bis = new ByteArrayInputStream(in.readByteArray())) { return (X509Certificate) certificateFactory.generateCertificate(bis); } catch (CertificateException e) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java index bf7a67882ac9b..dd356b8ab41ff 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/DelegatePkiAuthenticationResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ public DelegatePkiAuthenticationResponse(StreamInput input) throws IOException { super(input); accessToken = input.readString(); expiresIn = input.readTimeValue(); - if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication = new Authentication(input); } } @@ -68,7 +68,7 @@ public Authentication getAuthentication() { public void writeTo(StreamOutput out) throws IOException { out.writeString(accessToken); out.writeTimeValue(expiresIn); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java index 86ea99f183449..f24ddbd86c937 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/Grant.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -41,7 +41,7 @@ public Grant(StreamInput in) throws IOException { this.username = in.readOptionalString(); this.password = in.readOptionalSecureString(); this.accessToken = in.readOptionalSecureString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.runAsUsername = in.readOptionalString(); } else { this.runAsUsername = null; @@ -53,7 +53,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(username); out.writeOptionalSecureString(password); out.writeOptionalSecureString(accessToken); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalString(runAsUsername); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java index dc6aee050bc23..79c8ef10100e9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/ApiKey.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -45,7 +46,7 @@ */ public final class ApiKey implements ToXContentObject, Writeable { - public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersion.V_8_500_010; + public static final TransportVersion CROSS_CLUSTER_KEY_VERSION = TransportVersions.V_8_500_020; public enum Type { /** @@ -159,7 +160,7 @@ private ApiKey( } public ApiKey(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { this.name = in.readOptionalString(); } else { this.name = in.readString(); @@ -178,13 +179,13 @@ public ApiKey(StreamInput in) throws IOException { this.invalidated = in.readBoolean(); this.username = in.readString(); this.realm = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.metadata = in.readMap(); } else { this.metadata = Map.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { - final List roleDescriptors = in.readOptionalList(RoleDescriptor::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { + final List roleDescriptors = in.readOptionalCollectionAsList(RoleDescriptor::new); this.roleDescriptors = roleDescriptors != null ? List.copyOf(roleDescriptors) : null; this.limitedBy = in.readOptionalWriteable(RoleDescriptorsIntersection::new); } else { @@ -308,7 +309,7 @@ private void buildXContentForCrossClusterApiKeyAccess(XContentBuilder builder, R @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { out.writeOptionalString(name); } else { out.writeString(name); @@ -322,10 +323,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(invalidated); out.writeString(username); out.writeString(realm); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeGenericMap(metadata); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeOptionalCollection(roleDescriptors); out.writeOptionalWriteable(limitedBy); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java index 43b83fb9c0d31..81c8479c47285 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseBulkUpdateApiKeyRequest.java @@ -35,7 +35,7 @@ public BaseBulkUpdateApiKeyRequest( public BaseBulkUpdateApiKeyRequest(StreamInput in) throws IOException { super(in); - this.ids = in.readStringList(); + this.ids = in.readStringCollectionAsList(); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java index c4f42107e0279..b06f8868c53d1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BaseUpdateApiKeyRequest.java @@ -36,7 +36,7 @@ public BaseUpdateApiKeyRequest(@Nullable final List roleDescript public BaseUpdateApiKeyRequest(StreamInput in) throws IOException { super(in); - this.roleDescriptors = in.readOptionalList(RoleDescriptor::new); + this.roleDescriptors = in.readOptionalCollectionAsList(RoleDescriptor::new); this.metadata = in.readMap(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java index c6d2c07f70127..6b1d79e9404a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/BulkUpdateApiKeyResponse.java @@ -35,8 +35,8 @@ public BulkUpdateApiKeyResponse(final List updated, final List n public BulkUpdateApiKeyResponse(StreamInput in) throws IOException { super(in); - this.updated = in.readStringList(); - this.noops = in.readStringList(); + this.updated = in.readStringCollectionAsList(); + this.noops = in.readStringCollectionAsList(); this.errorDetails = in.readMap(StreamInput::readException); } @@ -67,7 +67,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(updated); out.writeStringCollection(noops); - out.writeMap(errorDetails, StreamOutput::writeString, StreamOutput::writeException); + out.writeMap(errorDetails, StreamOutput::writeException); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java index 7ea53c9f11fc8..e49ba19631a7e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequest.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.UUIDs; @@ -57,15 +57,15 @@ public CreateApiKeyRequest( public CreateApiKeyRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { this.name = in.readOptionalString(); } else { this.name = in.readString(); } this.expiration = in.readOptionalTimeValue(); - this.roleDescriptors = in.readImmutableList(RoleDescriptor::new); + this.roleDescriptors = in.readCollectionAsImmutableList(RoleDescriptor::new); this.refreshPolicy = WriteRequest.RefreshPolicy.readFrom(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.metadata = in.readMap(); } else { this.metadata = null; @@ -74,7 +74,7 @@ public CreateApiKeyRequest(StreamInput in) throws IOException { @Override protected String doReadId(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { return in.readString(); } else { return UUIDs.base64UUID(); @@ -118,18 +118,18 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeString(id); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { out.writeOptionalString(name); } else { out.writeString(name); } out.writeOptionalTimeValue(expiration); - out.writeList(getRoleDescriptors()); + out.writeCollection(getRoleDescriptors()); refreshPolicy.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeGenericMap(metadata); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java index 2b993155b4fc4..46e19d8af6f74 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/CreateCrossClusterApiKeyRequest.java @@ -41,7 +41,7 @@ public CreateCrossClusterApiKeyRequest(StreamInput in) throws IOException { super(in); this.name = in.readString(); this.expiration = in.readOptionalTimeValue(); - this.roleDescriptors = in.readImmutableList(RoleDescriptor::new); + this.roleDescriptors = in.readCollectionAsImmutableList(RoleDescriptor::new); this.refreshPolicy = WriteRequest.RefreshPolicy.readFrom(in); this.metadata = in.readMap(); } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeString(name); out.writeOptionalTimeValue(expiration); - out.writeList(roleDescriptors); + out.writeCollection(roleDescriptors); refreshPolicy.writeTo(out); out.writeGenericMap(metadata); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java index ebf36bdcdc421..d76696dc4fe99 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequest.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -25,7 +26,7 @@ */ public final class GetApiKeyRequest extends ActionRequest { - static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersion.V_8_500_054; + static TransportVersion API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION = TransportVersions.V_8_500_054; private final String realmName; private final String userName; @@ -41,12 +42,12 @@ public GetApiKeyRequest(StreamInput in) throws IOException { userName = textOrNull(in.readOptionalString()); apiKeyId = textOrNull(in.readOptionalString()); apiKeyName = textOrNull(in.readOptionalString()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { ownedByAuthenticatedUser = in.readOptionalBoolean(); } else { ownedByAuthenticatedUser = false; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { withLimitedBy = in.readBoolean(); } else { withLimitedBy = false; @@ -140,10 +141,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(userName); out.writeOptionalString(apiKeyId); out.writeOptionalString(apiKeyName); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { out.writeOptionalBoolean(ownedByAuthenticatedUser); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeBoolean(withLimitedBy); } if (out.getTransportVersion().onOrAfter(API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java index 2c43767af7b92..bb5b364005395 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequest.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -41,7 +41,7 @@ public InvalidateApiKeyRequest(StreamInput in) throws IOException { super(in); realmName = textOrNull(in.readOptionalString()); userName = textOrNull(in.readOptionalString()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { ids = in.readOptionalStringArray(); } else { final String id = in.readOptionalString(); @@ -49,7 +49,7 @@ public InvalidateApiKeyRequest(StreamInput in) throws IOException { } validateIds(ids); name = textOrNull(in.readOptionalString()); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { ownedByAuthenticatedUser = in.readOptionalBoolean(); } else { ownedByAuthenticatedUser = false; @@ -209,7 +209,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(realmName); out.writeOptionalString(userName); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { out.writeOptionalStringArray(ids); } else { if (ids != null) { @@ -223,7 +223,7 @@ public void writeTo(StreamOutput out) throws IOException { } } out.writeOptionalString(name); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { out.writeOptionalBoolean(ownedByAuthenticatedUser); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java index 1568f7037637b..7101d5803a105 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyResponse.java @@ -45,9 +45,9 @@ public final class InvalidateApiKeyResponse extends ActionResponse implements To public InvalidateApiKeyResponse(StreamInput in) throws IOException { super(in); - this.invalidatedApiKeys = in.readList(StreamInput::readString); - this.previouslyInvalidatedApiKeys = in.readList(StreamInput::readString); - this.errors = in.readList(StreamInput::readException); + this.invalidatedApiKeys = in.readCollectionAsList(StreamInput::readString); + this.previouslyInvalidatedApiKeys = in.readCollectionAsList(StreamInput::readString); + this.errors = in.readCollectionAsList(StreamInput::readException); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java index e315d81975d62..e7eefaeb3a525 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/apikey/QueryApiKeyRequest.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -67,12 +67,12 @@ public QueryApiKeyRequest(StreamInput in) throws IOException { this.from = in.readOptionalVInt(); this.size = in.readOptionalVInt(); if (in.readBoolean()) { - this.fieldSortBuilders = in.readList(FieldSortBuilder::new); + this.fieldSortBuilders = in.readCollectionAsList(FieldSortBuilder::new); } else { this.fieldSortBuilders = null; } this.searchAfterBuilder = in.readOptionalWriteable(SearchAfterBuilder::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { this.withLimitedBy = in.readBoolean(); } else { this.withLimitedBy = false; @@ -133,10 +133,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeList(fieldSortBuilders); + out.writeCollection(fieldSortBuilders); } out.writeOptionalWriteable(searchAfterBuilder); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeBoolean(withLimitedBy); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/enrollment/NodeEnrollmentResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/enrollment/NodeEnrollmentResponse.java index 40545d39e60d1..07e1b11abd421 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/enrollment/NodeEnrollmentResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/enrollment/NodeEnrollmentResponse.java @@ -42,7 +42,7 @@ public NodeEnrollmentResponse(StreamInput in) throws IOException { transportCaCert = in.readString(); transportKey = in.readString(); transportCert = in.readString(); - nodesAddresses = in.readStringList(); + nodesAddresses = in.readStringCollectionAsList(); } public NodeEnrollmentResponse( diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java index 022c5bc943c21..3f1160df95e22 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.oidc; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -54,7 +54,7 @@ public OpenIdConnectAuthenticateRequest(StreamInput in) throws IOException { redirectUri = in.readString(); state = in.readString(); nonce = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { realm = in.readOptionalString(); } @@ -113,7 +113,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(redirectUri); out.writeString(state); out.writeString(nonce); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_4_0)) { out.writeOptionalString(realm); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java index 29fcba5be85f8..936a2892a6dbe 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectAuthenticateResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.oidc; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -42,7 +42,7 @@ public OpenIdConnectAuthenticateResponse(StreamInput in) throws IOException { accessTokenString = in.readString(); refreshTokenString = in.readString(); expiresIn = in.readTimeValue(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication = new Authentication(in); } } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(accessTokenString); out.writeString(refreshTokenString); out.writeTimeValue(expiresIn); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java index 00cb814b7e1d7..88d8de80fe7a1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/oidc/OpenIdConnectPrepareAuthenticationResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.oidc; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -47,7 +47,7 @@ public OpenIdConnectPrepareAuthenticationResponse(StreamInput in) throws IOExcep authenticationRequestUrl = in.readString(); state = in.readString(); nonce = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { realmName = in.readString(); } } @@ -73,7 +73,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(authenticationRequestUrl); out.writeString(state); out.writeString(nonce); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { out.writeString(realmName); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/ClearPrivilegesCacheResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/ClearPrivilegesCacheResponse.java index 9bb11ac8e6893..cb56a2f6fda56 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/ClearPrivilegesCacheResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/ClearPrivilegesCacheResponse.java @@ -32,12 +32,12 @@ public ClearPrivilegesCacheResponse(ClusterName clusterName, List nodes, L @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::new); + return in.readCollectionAsList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java index 5844533a9f91b..f424dd5f4ed39 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/DeletePrivilegesResponse.java @@ -26,7 +26,7 @@ public final class DeletePrivilegesResponse extends ActionResponse implements To public DeletePrivilegesResponse(StreamInput in) throws IOException { super(in); - this.found = in.readImmutableSet(StreamInput::readString); + this.found = in.readCollectionAsImmutableSet(StreamInput::readString); } public DeletePrivilegesResponse(Collection found) { @@ -45,7 +45,7 @@ public Set found() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(found, StreamOutput::writeString); + out.writeStringCollection(found); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java index 77d4ac677e17e..e1e9b72364301 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesRequest.java @@ -34,7 +34,7 @@ public final class PutPrivilegesRequest extends ActionRequest implements Applica public PutPrivilegesRequest(StreamInput in) throws IOException { super(in); - privileges = in.readImmutableList(ApplicationPrivilegeDescriptor::new); + privileges = in.readCollectionAsImmutableList(ApplicationPrivilegeDescriptor::new); refreshPolicy = RefreshPolicy.readFrom(in); } @@ -127,7 +127,7 @@ public String toString() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(privileges); + out.writeCollection(privileges); refreshPolicy.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java index 6a5b9646dc0f8..2a0120d2ca8fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/privilege/PutPrivilegesResponse.java @@ -27,7 +27,7 @@ public final class PutPrivilegesResponse extends ActionResponse implements ToXCo public PutPrivilegesResponse(StreamInput in) throws IOException { super(in); - this.created = in.readImmutableMap(StreamInput::readStringList); + this.created = in.readImmutableMap(StreamInput::readStringCollectionAsList); } public PutPrivilegesResponse(Map> created) { @@ -50,7 +50,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(created, StreamOutput::writeString, StreamOutput::writeStringCollection); + out.writeMap(created, StreamOutput::writeStringCollection); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java index 8246c71ff2734..7a7d969ddb14d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesRequest.java @@ -32,8 +32,8 @@ public GetProfilesRequest(List uids, Set dataKeys) { public GetProfilesRequest(StreamInput in) throws IOException { super(in); - this.uids = in.readStringList(); - this.dataKeys = in.readSet(StreamInput::readString); + this.uids = in.readStringCollectionAsList(); + this.dataKeys = in.readCollectionAsSet(StreamInput::readString); } public GetProfilesRequest(String uid, Set dataKeys) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java index 08239f2a1d4b9..2dbf6743a5fde 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/GetProfilesResponse.java @@ -31,7 +31,7 @@ public GetProfilesResponse(List profiles, Map errors public GetProfilesResponse(StreamInput in) throws IOException { super(in); - this.profiles = in.readImmutableList(Profile::new); + this.profiles = in.readCollectionAsImmutableList(Profile::new); this.errors = in.readMap(StreamInput::readException); } @@ -45,8 +45,8 @@ public Map getErrors() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(profiles); - out.writeMap(errors, StreamOutput::writeString, StreamOutput::writeException); + out.writeCollection(profiles); + out.writeMap(errors, StreamOutput::writeException); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java index 7c347752904ae..1ce0540946edd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/Profile.java @@ -43,7 +43,7 @@ public record ProfileUser( public ProfileUser(StreamInput in) throws IOException { this( in.readString(), - in.readStringList(), + in.readStringCollectionAsList(), in.readString(), in.readOptionalString(), in.readOptionalString(), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/SuggestProfilesRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/SuggestProfilesRequest.java index dddcef16a455f..5b92996dafc3a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/SuggestProfilesRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/profile/SuggestProfilesRequest.java @@ -50,7 +50,7 @@ public SuggestProfilesRequest(Set dataKeys, String name, int size, Hint public SuggestProfilesRequest(StreamInput in) throws IOException { super(in); - this.dataKeys = in.readSet(StreamInput::readString); + this.dataKeys = in.readCollectionAsSet(StreamInput::readString); this.name = in.readOptionalString(); this.size = in.readVInt(); this.hint = in.readOptionalWriteable(Hint::new); @@ -139,7 +139,7 @@ public Hint(List uids, Map labelsInput) { } public Hint(StreamInput in) throws IOException { - this.uids = in.readStringList(); + this.uids = in.readStringCollectionAsList(); this.labels = in.readMapOfLists(StreamInput::readString); } @@ -161,7 +161,7 @@ public Tuple> getSingleLabel() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(uids); - out.writeMapOfLists(labels, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(labels, StreamOutput::writeStringCollection); } private ActionRequestValidationException validate(ActionRequestValidationException validationException) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java index 3ab93fc85ffbd..51c1746792067 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/realm/ClearRealmCacheResponse.java @@ -32,12 +32,12 @@ public ClearRealmCacheResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::new); + return in.readCollectionAsList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java index c4310663d3a3f..6b09ebffa1f21 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/ClearRolesCacheResponse.java @@ -35,12 +35,12 @@ public ClearRolesCacheResponse(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::new); + return in.readCollectionAsList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java index bda2542c720d2..e82a77f311de6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.role; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; @@ -54,13 +54,13 @@ public PutRoleRequest(StreamInput in) throws IOException { for (int i = 0; i < indicesSize; i++) { indicesPrivileges.add(new RoleDescriptor.IndicesPrivileges(in)); } - applicationPrivileges = in.readList(RoleDescriptor.ApplicationResourcePrivileges::new); + applicationPrivileges = in.readCollectionAsList(RoleDescriptor.ApplicationResourcePrivileges::new); configurableClusterPrivileges = ConfigurableClusterPrivileges.readArray(in); runAs = in.readStringArray(); refreshPolicy = RefreshPolicy.readFrom(in); metadata = in.readMap(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - remoteIndicesPrivileges = in.readList(RoleDescriptor.RemoteIndicesPrivileges::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + remoteIndicesPrivileges = in.readCollectionAsList(RoleDescriptor.RemoteIndicesPrivileges::new); } } @@ -209,17 +209,17 @@ public void writeTo(StreamOutput out) throws IOException { for (RoleDescriptor.IndicesPrivileges index : indicesPrivileges) { index.writeTo(out); } - out.writeList(applicationPrivileges); + out.writeCollection(applicationPrivileges); ConfigurableClusterPrivileges.writeArray(out, this.configurableClusterPrivileges); out.writeStringArray(runAs); refreshPolicy.writeTo(out); out.writeGenericMap(metadata); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeCollection(remoteIndicesPrivileges); } else if (hasRemoteIndicesPrivileges()) { throw new IllegalArgumentException( "versions of Elasticsearch before [" - + TransportVersion.V_8_8_0 + + TransportVersions.V_8_8_0 + "] can't handle remote indices privileges and attempted to send to [" + out.getTransportVersion() + "]" diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java index 89b092b0ea1a2..8059a30b88952 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/rolemapping/PutRoleMappingRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.rolemapping; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; @@ -46,9 +46,9 @@ public PutRoleMappingRequest(StreamInput in) throws IOException { super(in); this.name = in.readString(); this.enabled = in.readBoolean(); - this.roles = in.readStringList(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { - this.roleTemplates = in.readList(TemplateRoleName::new); + this.roles = in.readStringCollectionAsList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + this.roleTemplates = in.readCollectionAsList(TemplateRoleName::new); } this.rules = ExpressionParser.readExpression(in); this.metadata = in.readMap(); @@ -155,8 +155,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(enabled); out.writeStringCollection(roles); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { - out.writeList(roleTemplates); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + out.writeCollection(roleTemplates); } ExpressionParser.writeExpression(rules, out); out.writeGenericMap(metadata); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java index 707c9f36729a8..2cb0a76c2d6bf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlAuthenticateResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -31,13 +31,13 @@ public final class SamlAuthenticateResponse extends ActionResponse { public SamlAuthenticateResponse(StreamInput in) throws IOException { super(in); principal = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { realm = in.readString(); } tokenString = in.readString(); refreshToken = in.readString(); expiresIn = in.readTimeValue(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication = new Authentication(in); } } @@ -78,13 +78,13 @@ public Authentication getAuthentication() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(principal); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeString(realm); } out.writeString(tokenString); out.writeString(refreshToken); out.writeTimeValue(expiresIn); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java index 02d55ca18beed..3b5ddb21be91c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/saml/SamlPrepareAuthenticationRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.saml; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -33,7 +33,7 @@ public SamlPrepareAuthenticationRequest(StreamInput in) throws IOException { super(in); realmName = in.readOptionalString(); assertionConsumerServiceURL = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { relayState = in.readOptionalString(); } } @@ -87,7 +87,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(realmName); out.writeOptionalString(assertionConsumerServiceURL); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_5_0)) { out.writeOptionalString(relayState); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsNodesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsNodesResponse.java index 34299c85a29fb..8a16df024bde5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsNodesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsNodesResponse.java @@ -45,12 +45,12 @@ public GetServiceAccountCredentialsNodesResponse(StreamInput in) throws IOExcept @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(GetServiceAccountCredentialsNodesResponse.Node::new); + return in.readCollectionAsList(GetServiceAccountCredentialsNodesResponse.Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public List getFileTokenInfos() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsResponse.java index 1d2e8f7462137..017a74778ee3c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/GetServiceAccountCredentialsResponse.java @@ -37,7 +37,7 @@ public GetServiceAccountCredentialsResponse( public GetServiceAccountCredentialsResponse(StreamInput in) throws IOException { super(in); this.principal = in.readString(); - this.indexTokenInfos = in.readList(TokenInfo::new); + this.indexTokenInfos = in.readCollectionAsList(TokenInfo::new); this.nodesResponse = new GetServiceAccountCredentialsNodesResponse(in); } @@ -56,7 +56,7 @@ public GetServiceAccountCredentialsNodesResponse getNodesResponse() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(principal); - out.writeList(indexTokenInfos); + out.writeCollection(indexTokenInfos); nodesResponse.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/TokenInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/TokenInfo.java index 44d26f0e315e1..8582919de24ad 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/TokenInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/service/TokenInfo.java @@ -36,7 +36,7 @@ private TokenInfo(String name, Collection nodeNames) { public TokenInfo(StreamInput in) throws IOException { this.name = in.readString(); - this.nodeNames = in.readOptionalStringList(); + this.nodeNames = in.readOptionalStringCollectionAsList(); } public String getName() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java index c5e06ce5d1843..73719c7cae489 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/token/CreateTokenResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.token; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -41,7 +41,7 @@ public CreateTokenResponse(StreamInput in) throws IOException { scope = in.readOptionalString(); refreshToken = in.readOptionalString(); kerberosAuthenticationResponseToken = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication = new Authentication(in); } } @@ -93,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalString(scope); out.writeOptionalString(refreshToken); out.writeOptionalString(kerberosAuthenticationResponseToken); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { authentication.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java index 73e334f2a2495..78533dd93564f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; @@ -20,7 +20,7 @@ public class AuthenticateRequest extends ActionRequest { public AuthenticateRequest(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_5_0)) { // Older versions included the username as a field in.readString(); } @@ -37,7 +37,7 @@ public ActionRequestValidationException validate() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().before(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_5_0)) { throw new IllegalStateException("cannot send authenticate request to a node of earlier version"); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java index 127a84b103ecf..70ffdb8bb9c9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/AuthenticateResponse.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -19,7 +20,7 @@ public class AuthenticateResponse extends ActionResponse implements ToXContent { - public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersion.V_8_500_028; + public static final TransportVersion VERSION_OPERATOR_FIELD = TransportVersions.V_8_500_028; private final Authentication authentication; private final boolean operator; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java index 78dd9441369d3..2cd37df4ef15e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -43,13 +43,13 @@ public final class GetUserPrivilegesResponse extends ActionResponse { public GetUserPrivilegesResponse(StreamInput in) throws IOException { super(in); - cluster = in.readImmutableSet(StreamInput::readString); - configurableClusterPrivileges = in.readImmutableSet(ConfigurableClusterPrivileges.READER); - index = in.readImmutableSet(Indices::new); - application = in.readImmutableSet(RoleDescriptor.ApplicationResourcePrivileges::new); - runAs = in.readImmutableSet(StreamInput::readString); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - remoteIndex = in.readImmutableSet(RemoteIndices::new); + cluster = in.readCollectionAsImmutableSet(StreamInput::readString); + configurableClusterPrivileges = in.readCollectionAsImmutableSet(ConfigurableClusterPrivileges.READER); + index = in.readCollectionAsImmutableSet(Indices::new); + application = in.readCollectionAsImmutableSet(RoleDescriptor.ApplicationResourcePrivileges::new); + runAs = in.readCollectionAsImmutableSet(StreamInput::readString); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + remoteIndex = in.readCollectionAsImmutableSet(RemoteIndices::new); } else { remoteIndex = Set.of(); } @@ -101,17 +101,17 @@ public boolean hasRemoteIndicesPrivileges() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(cluster, StreamOutput::writeString); + out.writeStringCollection(cluster); out.writeCollection(configurableClusterPrivileges, ConfigurableClusterPrivileges.WRITER); out.writeCollection(index); out.writeCollection(application); - out.writeCollection(runAs, StreamOutput::writeString); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + out.writeStringCollection(runAs); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeCollection(remoteIndex); } else if (hasRemoteIndicesPrivileges()) { throw new IllegalArgumentException( "versions of Elasticsearch before [" - + TransportVersion.V_8_8_0 + + TransportVersions.V_8_8_0 + "] can't handle remote indices privileges and attempted to send to [" + out.getTransportVersion() + "]" @@ -144,7 +144,7 @@ public int hashCode() { public record RemoteIndices(Indices indices, Set remoteClusters) implements ToXContentObject, Writeable { public RemoteIndices(StreamInput in) throws IOException { - this(new Indices(in), Collections.unmodifiableSet(new TreeSet<>(in.readSet(StreamInput::readString)))); + this(new Indices(in), Collections.unmodifiableSet(new TreeSet<>(in.readCollectionAsSet(StreamInput::readString)))); } @Override @@ -190,14 +190,14 @@ public Indices( public Indices(StreamInput in) throws IOException { // The use of TreeSet is to provide a consistent order that can be relied upon in tests - indices = Collections.unmodifiableSet(new TreeSet<>(in.readSet(StreamInput::readString))); - privileges = Collections.unmodifiableSet(new TreeSet<>(in.readSet(StreamInput::readString))); - fieldSecurity = in.readImmutableSet(input -> { + indices = Collections.unmodifiableSet(new TreeSet<>(in.readCollectionAsSet(StreamInput::readString))); + privileges = Collections.unmodifiableSet(new TreeSet<>(in.readCollectionAsSet(StreamInput::readString))); + fieldSecurity = in.readCollectionAsImmutableSet(input -> { final String[] grant = input.readOptionalStringArray(); final String[] exclude = input.readOptionalStringArray(); return new FieldPermissionsDefinition.FieldGrantExcludeGroup(grant, exclude); }); - queries = in.readImmutableSet(StreamInput::readBytesReference); + queries = in.readCollectionAsImmutableSet(StreamInput::readBytesReference); this.allowRestrictedIndices = in.readBoolean(); } @@ -305,8 +305,8 @@ private static boolean nonEmpty(String[] grantedFields) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(indices, StreamOutput::writeString); - out.writeCollection(privileges, StreamOutput::writeString); + out.writeStringCollection(indices); + out.writeStringCollection(privileges); out.writeCollection(fieldSecurity, (output, fields) -> { output.writeOptionalStringArray(fields.getGrantedFields()); output.writeOptionalStringArray(fields.getExcludedFields()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java index e6e7c0e6b8a8a..29c48f29709fd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -28,7 +28,7 @@ public class GetUsersRequest extends ActionRequest implements UserRequest { public GetUsersRequest(StreamInput in) throws IOException { super(in); usernames = in.readStringArray(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { withProfileUid = in.readBoolean(); } else { withProfileUid = false; @@ -77,7 +77,7 @@ public void setWithProfileUid(boolean withProfileUid) { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeStringArray(usernames); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeBoolean(withProfileUid); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java index 6095a05cd5336..6395d2a090afa 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/GetUsersResponse.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.action.user; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -43,7 +43,7 @@ public GetUsersResponse(StreamInput in) throws IOException { users[i] = user; } } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { if (in.readBoolean()) { profileUidLookup = in.readMap(StreamInput::readString); } else { @@ -83,10 +83,10 @@ public void writeTo(StreamOutput out) throws IOException { Authentication.AuthenticationSerializationHelper.writeUserTo(user, out); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { if (profileUidLookup != null) { out.writeBoolean(true); - out.writeMap(profileUidLookup, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(profileUidLookup, StreamOutput::writeString); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java index 073e639715bce..e59f588ffd65c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/HasPrivilegesResponse.java @@ -128,9 +128,9 @@ private static Set readResourcePrivileges(StreamInput in) th @Override public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(completeMatch); - out.writeMap(cluster, StreamOutput::writeString, StreamOutput::writeBoolean); + out.writeMap(cluster, StreamOutput::writeBoolean); writeResourcePrivileges(out, index); - out.writeMap(application, StreamOutput::writeString, HasPrivilegesResponse::writeResourcePrivileges); + out.writeMap(application, HasPrivilegesResponse::writeResourcePrivileges); out.writeString(username); } @@ -138,7 +138,7 @@ private static void writeResourcePrivileges(StreamOutput out, Set uids, PrivilegesToCheck privileg public ProfileHasPrivilegesRequest(StreamInput in) throws IOException { super(in); - this.uids = in.readStringList(); + this.uids = in.readStringCollectionAsList(); this.privilegesToCheck = PrivilegesToCheck.readFrom(in); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java index c3e20d4d6e4e9..8e8ff50e5b4ac 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/action/user/ProfileHasPrivilegesResponse.java @@ -26,7 +26,7 @@ public class ProfileHasPrivilegesResponse extends ActionResponse implements ToXC public ProfileHasPrivilegesResponse(StreamInput in) throws IOException { super(in); - this.hasPrivilegeUids = in.readSet(StreamInput::readString); + this.hasPrivilegeUids = in.readCollectionAsSet(StreamInput::readString); this.errors = in.readMap(StreamInput::readException); } @@ -69,7 +69,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(hasPrivilegeUids); - out.writeMap(errors, StreamOutput::writeString, StreamOutput::writeException); + out.writeMap(errors, StreamOutput::writeException); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java index cd5684709f793..6efb1cfd1fe03 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/Authentication.java @@ -9,6 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -103,9 +104,9 @@ public final class Authentication implements ToXContentObject { private static final Logger logger = LogManager.getLogger(Authentication.class); private static final TransportVersion VERSION_AUTHENTICATION_TYPE = TransportVersion.fromId(6_07_00_99); - public static final TransportVersion VERSION_API_KEY_ROLES_AS_BYTES = TransportVersion.V_7_9_0; - public static final TransportVersion VERSION_REALM_DOMAINS = TransportVersion.V_8_2_0; - public static final TransportVersion VERSION_METADATA_BEYOND_GENERIC_MAP = TransportVersion.V_8_8_0; + public static final TransportVersion VERSION_API_KEY_ROLES_AS_BYTES = TransportVersions.V_7_9_0; + public static final TransportVersion VERSION_REALM_DOMAINS = TransportVersions.V_8_2_0; + public static final TransportVersion VERSION_METADATA_BEYOND_GENERIC_MAP = TransportVersions.V_8_8_0; private final AuthenticationType type; private final Subject authenticatingSubject; private final Subject effectiveSubject; @@ -742,7 +743,7 @@ public static Authentication getAuthenticationFromCrossClusterAccessMetadata(Aut CROSS_CLUSTER_ACCESS_AUTHENTICATION_KEY, Authentication::new, CROSS_CLUSTER_ACCESS_ROLE_DESCRIPTORS_KEY, - in -> in.readList(RoleDescriptorsBytes::new) + in -> in.readCollectionAsList(RoleDescriptorsBytes::new) ); private static Map readMetadata(StreamInput in) throws IOException { @@ -767,7 +768,7 @@ private static Map readMetadata(StreamInput in) throws IOExcepti (out, v) -> { @SuppressWarnings("unchecked") final List roleDescriptorsBytesList = (List) v; - out.writeCollection(roleDescriptorsBytesList, (o, rdb) -> rdb.writeTo(o)); + out.writeCollection(roleDescriptorsBytesList); } ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/CrossClusterAccessSubjectInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/CrossClusterAccessSubjectInfo.java index 9f1b6295a012b..b6b54846fc7c4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/CrossClusterAccessSubjectInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/CrossClusterAccessSubjectInfo.java @@ -158,7 +158,7 @@ public String encode() throws IOException { out.setTransportVersion(authentication.getEffectiveSubject().getTransportVersion()); TransportVersion.writeVersion(authentication.getEffectiveSubject().getTransportVersion(), out); authentication.writeTo(out); - out.writeCollection(roleDescriptorsBytesList, (o, rdb) -> rdb.writeTo(o)); + out.writeCollection(roleDescriptorsBytesList); return Base64.getEncoder().encodeToString(BytesReference.toBytes(out.bytes())); } @@ -169,7 +169,7 @@ public static CrossClusterAccessSubjectInfo decode(final String header) throws I final TransportVersion version = TransportVersion.readVersion(in); in.setTransportVersion(version); final Authentication authentication = new Authentication(in); - final List roleDescriptorsBytesList = in.readImmutableList(RoleDescriptorsBytes::new); + final List roleDescriptorsBytesList = in.readCollectionAsImmutableList(RoleDescriptorsBytes::new); return new CrossClusterAccessSubjectInfo(authentication, roleDescriptorsBytesList); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmDomain.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmDomain.java index 53de14b5b68bb..573023739db11 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmDomain.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/RealmDomain.java @@ -33,7 +33,7 @@ public void writeTo(StreamOutput out) throws IOException { static RealmDomain readFrom(StreamInput in) throws IOException { String domainName = in.readString(); - Set realms = in.readSet(RealmConfig.RealmIdentifier::new); + Set realms = in.readCollectionAsSet(RealmConfig.RealmIdentifier::new); return new RealmDomain(domainName, realms); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetadata.java index 153f3be38f605..54668c9dd2df7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/TokenMetadata.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authc; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; @@ -46,13 +47,13 @@ public TokenMetadata(List keys, byte[] currentKeyHash) { public TokenMetadata(StreamInput input) throws IOException { currentKeyHash = input.readByteArray(); - keys = input.readImmutableList(KeyAndTimestamp::new); + keys = input.readCollectionAsImmutableList(KeyAndTimestamp::new); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeByteArray(currentKeyHash); - out.writeList(keys); + out.writeCollection(keys); } public static NamedDiff readDiffFrom(StreamInput in) throws IOException { @@ -93,7 +94,7 @@ public String toString() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java index 9f1229cbf9261..8fe018a825468 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/TokensInvalidationResult.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.security.authc.support; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -56,13 +56,13 @@ public TokensInvalidationResult( } public TokensInvalidationResult(StreamInput in) throws IOException { - this.invalidatedTokens = in.readStringList(); - this.previouslyInvalidatedTokens = in.readStringList(); - this.errors = in.readList(StreamInput::readException); - if (in.getTransportVersion().before(TransportVersion.V_7_2_0)) { + this.invalidatedTokens = in.readStringCollectionAsList(); + this.previouslyInvalidatedTokens = in.readStringCollectionAsList(); + this.errors = in.readCollectionAsList(StreamInput::readException); + if (in.getTransportVersion().before(TransportVersions.V_7_2_0)) { in.readVInt(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.restStatus = RestStatus.readFrom(in); } } @@ -111,10 +111,10 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(invalidatedTokens); out.writeStringCollection(previouslyInvalidatedTokens); out.writeCollection(errors, StreamOutput::writeException); - if (out.getTransportVersion().before(TransportVersion.V_7_2_0)) { + if (out.getTransportVersion().before(TransportVersions.V_7_2_0)) { out.writeVInt(5); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { RestStatus.writeTo(out, restStatus); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java index cbe1e71d8fc05..426e8b0563a90 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/ExpressionRoleMapping.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.security.authc.support.mapper; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; @@ -96,9 +96,9 @@ public ExpressionRoleMapping( public ExpressionRoleMapping(StreamInput in) throws IOException { this.name = in.readString(); this.enabled = in.readBoolean(); - this.roles = in.readStringList(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { - this.roleTemplates = in.readList(TemplateRoleName::new); + this.roles = in.readStringCollectionAsList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + this.roleTemplates = in.readCollectionAsList(TemplateRoleName::new); } else { this.roleTemplates = Collections.emptyList(); } @@ -111,8 +111,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeBoolean(enabled); out.writeStringCollection(roles); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_2_0)) { - out.writeList(roleTemplates); + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_2_0)) { + out.writeCollection(roleTemplates); } ExpressionParser.writeExpression(expression, out); out.writeGenericMap(metadata); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java index 6363400c66f7c..50d15672ae80d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/ExpressionParser.java @@ -36,11 +36,11 @@ public static void writeExpression(RoleMapperExpression expression, StreamOutput } static List readExpressionList(StreamInput in) throws IOException { - return in.readNamedWriteableList(RoleMapperExpression.class); + return in.readNamedWriteableCollectionAsList(RoleMapperExpression.class); } static void writeExpressionList(List list, StreamOutput out) throws IOException { - out.writeNamedWriteableList(list); + out.writeNamedWriteableCollection(list); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java index 1b640164d54b5..f6b4609b2c61c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authc/support/mapper/expressiondsl/FieldExpression.java @@ -45,13 +45,13 @@ public FieldExpression(String field, List values) { } public FieldExpression(StreamInput in) throws IOException { - this(in.readString(), in.readList(FieldValue::readFrom)); + this(in.readString(), in.readCollectionAsList(FieldValue::readFrom)); } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(field); - out.writeList(values); + out.writeCollection(values); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java index 977031ebf28d3..6a5df4370bd50 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptor.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesArray; @@ -53,7 +54,7 @@ */ public class RoleDescriptor implements ToXContentObject, Writeable { - public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersion.V_8_500_010; + public static final TransportVersion WORKFLOWS_RESTRICTION_VERSION = TransportVersions.V_8_500_020; public static final String ROLE_TYPE = "role"; @@ -194,7 +195,7 @@ public RoleDescriptor(StreamInput in) throws IOException { this.applicationPrivileges = in.readArray(ApplicationResourcePrivileges::new, ApplicationResourcePrivileges[]::new); this.configurableClusterPrivileges = ConfigurableClusterPrivileges.readArray(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.remoteIndicesPrivileges = in.readArray(RemoteIndicesPrivileges::new, RemoteIndicesPrivileges[]::new); } else { this.remoteIndicesPrivileges = RemoteIndicesPrivileges.NONE; @@ -413,7 +414,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(transientMetadata); out.writeArray(ApplicationResourcePrivileges::write, applicationPrivileges); ConfigurableClusterPrivileges.writeArray(out, getConditionalClusterPrivileges()); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeArray(remoteIndicesPrivileges); } if (out.getTransportVersion().onOrAfter(WORKFLOWS_RESTRICTION_VERSION)) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java index da7ce1f845739..bdfc87a06c922 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorsIntersection.java @@ -31,7 +31,7 @@ public RoleDescriptorsIntersection(RoleDescriptor roleDescriptor) { } public RoleDescriptorsIntersection(StreamInput in) throws IOException { - this(in.readImmutableList(inner -> inner.readSet(RoleDescriptor::new))); + this(in.readCollectionAsImmutableList(inner -> inner.readCollectionAsSet(RoleDescriptor::new))); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java index c722e845ab9df..1cf59710d2476 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/privilege/ApplicationPrivilegeDescriptor.java @@ -65,7 +65,7 @@ public ApplicationPrivilegeDescriptor(String application, String name, Set applications = in.readSet(StreamInput::readString); + final Set applications = in.readCollectionAsSet(StreamInput::readString); return new WriteProfileDataPrivileges(applications); } @@ -297,11 +297,11 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeCollection(this.applicationNames, StreamOutput::writeString); + out.writeStringCollection(this.applicationNames); } public static ManageApplicationPrivileges createFrom(StreamInput in) throws IOException { - final Set applications = in.readSet(StreamInput::readString); + final Set applications = in.readCollectionAsSet(StreamInput::readString); return new ManageApplicationPrivileges(applications); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/CacheIteratorHelper.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/CacheIteratorHelper.java index af2fcf041172f..5b372ed91b7bf 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/CacheIteratorHelper.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/support/CacheIteratorHelper.java @@ -31,8 +31,10 @@ public CacheIteratorHelper(Cache cache) { final ReadWriteLock lock = new ReentrantReadWriteLock(); // the lock is used in an odd manner; when iterating over the cache we cannot have modifiers other than deletes using the // iterator but when not iterating we can modify the cache without external locking. When making normal modifications to the cache - // the read lock is obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or values - // of the cache the write lock must obtained to prevent any modifications. + // the read lock can be obtained so that we can allow concurrent modifications; however when we need to iterate over the keys or + // values of the cache the write lock must be obtained to prevent any modifications. + // Note - the write lock is needed for concurrent modifications across Cache#put and Cache#invalidateAll + // see https://github.com/elastic/elasticsearch/issues/99326 for additional information updateLock = new ReleasableLock(lock.readLock()); iteratorLock = new ReleasableLock(lock.writeLock()); } @@ -41,7 +43,7 @@ public ReleasableLock acquireUpdateLock() { return updateLock.acquire(); } - private ReleasableLock acquireForIterator() { + public ReleasableLock acquireForIterator() { return iteratorLock.acquire(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java index f8e23155bcc30..2739f64986439 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/user/InternalUsers.java @@ -9,10 +9,12 @@ import org.elasticsearch.action.admin.indices.analyze.ReloadAnalyzerAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; +import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.support.MetadataUtils; @@ -144,7 +146,9 @@ public class InternalUsers { ForceMergeAction.NAME + "*", // indices stats is used by rollover, so we need to grant it here IndicesStatsAction.NAME + "*", - UpdateSettingsAction.NAME + UpdateSettingsAction.NAME, + DownsampleAction.NAME, + AddIndexBlockAction.NAME ) .allowRestrictedIndices(false) .build(), @@ -160,7 +164,9 @@ public class InternalUsers { RolloverAction.NAME, ForceMergeAction.NAME + "*", // indices stats is used by rollover, so we need to grant it here - IndicesStatsAction.NAME + "*" + IndicesStatsAction.NAME + "*", + UpdateSettingsAction.NAME + // Down-sampling related actions are not granted here because down-sampling is not supported for system data streams ) .allowRestrictedIndices(true) .build() }, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java index ffb564b9e0d5d..289c76714b731 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SLMFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.slm; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -40,7 +41,7 @@ public SLMFeatureSetUsage(@Nullable SnapshotLifecycleStats slmStats) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_5_0; + return TransportVersions.V_7_5_0; } public SnapshotLifecycleStats getStats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java index 5f1483e4c6004..9610f70689897 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.slm; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; @@ -131,12 +132,12 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.snapshotConfigurations, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); + out.writeMap(this.snapshotConfigurations, StreamOutput::writeWriteable); out.writeEnum(this.operationMode); this.slmStats.writeTo(out); } @@ -230,7 +231,7 @@ static Diff readLifecyclePolicyDiffFrom(StreamI @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java index d3bd79c66d797..a317b79901751 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicyMetadata.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.slm; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -116,7 +116,7 @@ public static SnapshotLifecyclePolicyMetadata parse(XContentParser parser, Strin this.modifiedDate = in.readVLong(); this.lastSuccess = in.readOptionalWriteable(SnapshotInvocationRecord::new); this.lastFailure = in.readOptionalWriteable(SnapshotInvocationRecord::new); - this.invocationsSinceLastSuccess = in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0) ? in.readVLong() : 0L; + this.invocationsSinceLastSuccess = in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0) ? in.readVLong() : 0L; } @Override @@ -127,7 +127,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(this.modifiedDate); out.writeOptionalWriteable(this.lastSuccess); out.writeOptionalWriteable(this.lastFailure); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeVLong(this.invocationsSinceLastSuccess); } } @@ -157,8 +157,8 @@ public SnapshotLifecyclePolicy getPolicy() { return policy; } - public String getName() { - return policy.getName(); + public String getId() { + return policy.getId(); } public long getVersion() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java index 4370671faf901..7a46220644abd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleStats.java @@ -210,7 +210,7 @@ public void snapshotDeleteFailure(String slmPolicy) { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(policyStats, StreamOutput::writeString, (v, o) -> o.writeTo(v)); + out.writeMap(policyStats, StreamOutput::writeWriteable); out.writeVLong(retentionRunCount.count()); out.writeVLong(retentionFailedCount.count()); out.writeVLong(retentionTimedOut.count()); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java index 0e9fe37127476..1a95d649f2616 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/action/GetSnapshotLifecycleAction.java @@ -92,7 +92,7 @@ public Response(List lifecycles) { } public Response(StreamInput in) throws IOException { - this.lifecycles = in.readList(SnapshotLifecyclePolicyItem::new); + this.lifecycles = in.readCollectionAsList(SnapshotLifecyclePolicyItem::new); } public List getPolicies() { @@ -116,7 +116,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(lifecycles); + out.writeCollection(lifecycles); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java index 0c68b5a150998..380c0e97d30dd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/SpatialFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.spatial; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xpack.core.XPackFeatureSet; @@ -28,7 +29,7 @@ public SpatialFeatureSetUsage(SpatialStatsAction.Response statsResponse) { public SpatialFeatureSetUsage(StreamInput input) throws IOException { super(input); - if (input.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (input.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { this.statsResponse = new SpatialStatsAction.Response(input); } else { this.statsResponse = null; @@ -37,7 +38,7 @@ public SpatialFeatureSetUsage(StreamInput input) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } SpatialStatsAction.Response statsResponse() { @@ -47,7 +48,7 @@ SpatialStatsAction.Response statsResponse() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_11_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_11_0)) { this.statsResponse.writeTo(out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java index e9d179c68c378..f1d28f040dbb4 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/spatial/action/SpatialStatsAction.java @@ -101,12 +101,12 @@ public Response(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } public EnumCounters getStats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java index 1cc55f267ce0a..fbb8025d0e446 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/sql/SqlFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.sql; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -33,7 +34,7 @@ public SqlFeatureSetUsage(Map stats) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_0_0; + return TransportVersions.V_7_0_0; } public Map stats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java index 3588d21110bc0..ee077e5140606 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfo.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ssl.cert; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -56,7 +56,7 @@ public CertificateInfo(String path, String format, String alias, boolean hasPriv } public CertificateInfo(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { this.path = in.readOptionalString(); } else { this.path = in.readString(); @@ -67,7 +67,7 @@ public CertificateInfo(StreamInput in) throws IOException { this.serialNumber = in.readString(); this.hasPrivateKey = in.readBoolean(); this.expiry = Instant.ofEpochMilli(in.readLong()).atZone(ZoneOffset.UTC); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { this.issuer = in.readString(); } else { this.issuer = ""; @@ -76,7 +76,7 @@ public CertificateInfo(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeOptionalString(this.path); } else { out.writeString(this.path == null ? "" : this.path); @@ -87,7 +87,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(serialNumber); out.writeBoolean(hasPrivateKey); out.writeLong(expiry.toInstant().toEpochMilli()); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeString(issuer); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumRequest.java index 40de316c8c61c..84a28d9c8d1ef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumRequest.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.termsenum.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.support.IndicesOptions; @@ -78,7 +78,7 @@ public NodeTermsEnumRequest(StreamInput in) throws IOException { for (int i = 0; i < numShards; i++) { shardIds.add(new ShardId(in)); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_1)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_15_1)) { originalIndices = OriginalIndices.readOriginalIndices(in); } else { String[] indicesNames = shardIds.stream().map(ShardId::getIndexName).distinct().toArray(String[]::new); @@ -106,7 +106,7 @@ public void writeTo(StreamOutput out) throws IOException { for (ShardId shardId : shardIds) { shardId.writeTo(out); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_1)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_15_1)) { OriginalIndices.writeOriginalIndices(originalIndices, out); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumResponse.java index cede7ddeedd70..51c183b8e49c3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/NodeTermsEnumResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.termsenum.action; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -31,14 +31,14 @@ class NodeTermsEnumResponse extends TransportResponse { NodeTermsEnumResponse(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().before(TransportVersion.V_8_2_0)) { - terms = in.readList(r -> { + if (in.getTransportVersion().before(TransportVersions.V_8_2_0)) { + terms = in.readCollectionAsList(r -> { String term = r.readString(); in.readLong(); // obsolete docCount field return term; }); } else { - terms = in.readStringList(); + terms = in.readStringCollectionAsList(); } error = in.readOptionalString(); complete = in.readBoolean(); @@ -70,7 +70,7 @@ private NodeTermsEnumResponse(String nodeId, List terms, String error, b @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().before(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_2_0)) { out.writeCollection(terms.stream().map(term -> (Writeable) out1 -> { out1.writeString(term); out1.writeLong(1); // obsolete docCount field diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java index c605abbbe62e4..43dc92857551a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/termsenum/action/TermsEnumResponse.java @@ -59,7 +59,7 @@ public class TermsEnumResponse extends BroadcastResponse { TermsEnumResponse(StreamInput in) throws IOException { super(in); - terms = in.readStringList(); + terms = in.readStringCollectionAsList(); complete = in.readBoolean(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java index 76758b36ffc6d..8387511fa50f5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/action/FindStructureAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.textstructure.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; @@ -140,13 +140,13 @@ public Request(StreamInput in) throws IOException { timeout = in.readOptionalTimeValue(); charset = in.readOptionalString(); format = in.readBoolean() ? in.readEnum(TextStructure.Format.class) : null; - columnNames = in.readBoolean() ? in.readStringList() : null; + columnNames = in.readBoolean() ? in.readStringCollectionAsList() : null; hasHeaderRow = in.readOptionalBoolean(); delimiter = in.readBoolean() ? (char) in.readVInt() : null; quote = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); grokPattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { ecsCompatibility = in.readOptionalString(); } else { ecsCompatibility = null; @@ -391,7 +391,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeCollection(columnNames, StreamOutput::writeString); + out.writeStringCollection(columnNames); } out.writeOptionalBoolean(hasHeaderRow); if (delimiter == null) { @@ -408,7 +408,7 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalBoolean(shouldTrimFields); out.writeOptionalString(grokPattern); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeOptionalString(ecsCompatibility); } out.writeOptionalString(timestampFormat); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java index 607cf38421ebe..815723e40a373 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/FieldStats.java @@ -123,7 +123,7 @@ public FieldStats(StreamInput in) throws IOException { medianValue = in.readOptionalDouble(); earliestTimestamp = in.readOptionalString(); latestTimestamp = in.readOptionalString(); - topHits = in.readList(StreamInput::readMap); + topHits = in.readCollectionAsList(StreamInput::readMap); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java index 1a5e23b0ca18c..c37c795f5a94c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/textstructure/structurefinder/TextStructure.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.textstructure.structurefinder; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -220,25 +220,25 @@ public TextStructure(StreamInput in) throws IOException { format = in.readEnum(Format.class); multilineStartPattern = in.readOptionalString(); excludeLinesPattern = in.readOptionalString(); - columnNames = in.readBoolean() ? in.readImmutableList(StreamInput::readString) : null; + columnNames = in.readBoolean() ? in.readCollectionAsImmutableList(StreamInput::readString) : null; hasHeaderRow = in.readOptionalBoolean(); delimiter = in.readBoolean() ? (char) in.readVInt() : null; quote = in.readBoolean() ? (char) in.readVInt() : null; shouldTrimFields = in.readOptionalBoolean(); grokPattern = in.readOptionalString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { ecsCompatibility = getNonNullEcsCompatibilityString(in.readString()); } else { ecsCompatibility = getNonNullEcsCompatibilityString(null); } - jodaTimestampFormats = in.readBoolean() ? in.readImmutableList(StreamInput::readString) : null; - javaTimestampFormats = in.readBoolean() ? in.readImmutableList(StreamInput::readString) : null; + jodaTimestampFormats = in.readBoolean() ? in.readCollectionAsImmutableList(StreamInput::readString) : null; + javaTimestampFormats = in.readBoolean() ? in.readCollectionAsImmutableList(StreamInput::readString) : null; timestampField = in.readOptionalString(); needClientTimezone = in.readBoolean(); mappings = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap())); ingestPipeline = in.readBoolean() ? Collections.unmodifiableMap(in.readMap()) : null; fieldStats = Collections.unmodifiableSortedMap(new TreeMap<>(in.readMap(FieldStats::new))); - explanation = in.readImmutableList(StreamInput::readString); + explanation = in.readCollectionAsImmutableList(StreamInput::readString); } @Override @@ -255,7 +255,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeCollection(columnNames, StreamOutput::writeString); + out.writeStringCollection(columnNames); } out.writeOptionalBoolean(hasHeaderRow); if (delimiter == null) { @@ -272,20 +272,20 @@ public void writeTo(StreamOutput out) throws IOException { } out.writeOptionalBoolean(shouldTrimFields); out.writeOptionalString(grokPattern); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeString(ecsCompatibility); } if (jodaTimestampFormats == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeCollection(jodaTimestampFormats, StreamOutput::writeString); + out.writeStringCollection(jodaTimestampFormats); } if (javaTimestampFormats == null) { out.writeBoolean(false); } else { out.writeBoolean(true); - out.writeCollection(javaTimestampFormats, StreamOutput::writeString); + out.writeStringCollection(javaTimestampFormats); } out.writeOptionalString(timestampField); out.writeBoolean(needClientTimezone); @@ -296,8 +296,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); out.writeGenericMap(ingestPipeline); } - out.writeMap(fieldStats, StreamOutput::writeString, (out1, value) -> value.writeTo(out1)); - out.writeCollection(explanation, StreamOutput::writeString); + out.writeMap(fieldStats, StreamOutput::writeWriteable); + out.writeStringCollection(explanation); } public int getNumLinesAnalyzed() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java index 5d58284daa2b6..66c97876c1f6f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformFeatureSetUsage.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.transform; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,14 +50,14 @@ public TransformFeatureSetUsage( @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_5_0; + return TransportVersions.V_7_5_0; } @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(transformCountByState, StreamOutput::writeString, StreamOutput::writeLong); - out.writeMap(transformCountByFeature, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(transformCountByState, StreamOutput::writeLong); + out.writeMap(transformCountByFeature, StreamOutput::writeLong); accumulatedStats.writeTo(out); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java index bdbb5b5ff5371..e54cf2edb2690 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.transform; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; @@ -54,7 +55,7 @@ public boolean isResetMode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override @@ -120,7 +121,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java index 96c3312dfeb0e..6f7cc7cf1eea6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/DeleteTransformAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.transform.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -45,7 +45,7 @@ public Request(StreamInput in) throws IOException { super(in); id = in.readString(); force = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { deleteDestIndex = in.readBoolean(); } else { deleteDestIndex = false; @@ -69,7 +69,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); out.writeBoolean(force); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeBoolean(deleteDestIndex); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java index e0c2598963554..1829e82758d3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointAction.java @@ -121,7 +121,7 @@ public Response(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(getCheckpoints(), StreamOutput::writeString, StreamOutput::writeLongArray); + out.writeMap(getCheckpoints(), StreamOutput::writeLongArray); } public Map getCheckpoints() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointNodeAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointNodeAction.java index dec098f0e5a96..ebc34211040c9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointNodeAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetCheckpointNodeAction.java @@ -49,7 +49,7 @@ public Response(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(getCheckpoints(), StreamOutput::writeString, StreamOutput::writeLongArray); + out.writeMap(getCheckpoints(), StreamOutput::writeLongArray); } public Map getCheckpoints() { @@ -94,7 +94,7 @@ public Request(Set shards, OriginalIndices originalIndices) { public Request(StreamInput in) throws IOException { super(in); - this.shards = in.readImmutableSet(ShardId::new); + this.shards = in.readCollectionAsImmutableSet(ShardId::new); this.originalIndices = OriginalIndices.readOriginalIndices(in); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java index aed2daf0c2a4e..737218607cb8b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.common.ValidationException; @@ -136,9 +136,9 @@ public Response(List transformConfigs, long count, List public Response(StreamInput in) throws IOException { super(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { if (in.readBoolean()) { - this.errors = in.readList(Error::new); + this.errors = in.readCollectionAsList(Error::new); } else { this.errors = null; } @@ -197,10 +197,10 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { if (errors != null) { out.writeBoolean(true); - out.writeList(errors); + out.writeCollection(errors); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java index f57b4529e996a..31a577dd43a43 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/GetTransformStatsAction.java @@ -71,7 +71,7 @@ public Request(String id, @Nullable TimeValue timeout) { public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - expandedIds = in.readImmutableList(StreamInput::readString); + expandedIds = in.readCollectionAsImmutableList(StreamInput::readString); pageParams = new PageParams(in); allowNoMatch = in.readBoolean(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java index 12979a38f8601..e5d11e8bc4c47 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StartTransformAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.master.AcknowledgedRequest; @@ -48,7 +48,7 @@ public Request(String id, Instant from, TimeValue timeout) { public Request(StreamInput in) throws IOException { super(in); id = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { from = in.readOptionalInstant(); } else { from = null; @@ -67,7 +67,7 @@ public Instant from() { public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(id); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalInstant(from); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java index fc7cfaf9aa450..b9852e1f2a0d0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/StopTransformAction.java @@ -121,7 +121,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean hasExpandedIds = expandedIds != null; out.writeBoolean(hasExpandedIds); if (hasExpandedIds) { - out.writeStringArray(expandedIds.toArray(new String[0])); + out.writeStringCollection(expandedIds); } out.writeBoolean(allowNoMatch); out.writeBoolean(waitForCheckpoint); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java index be99bc7d2106c..6ecdd6519714b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/UpdateTransformAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.support.tasks.BaseTasksRequest; @@ -65,7 +65,7 @@ public Request(StreamInput in) throws IOException { if (in.readBoolean()) { this.config = new TransformConfig(in); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { if (in.readBoolean()) { this.authState = new AuthorizationState(in); } @@ -152,7 +152,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(true); config.writeTo(out); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { if (authState == null) { out.writeBoolean(false); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java index 3b5163dbf4c2d..34d737a4745db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/action/ValidateTransformAction.java @@ -110,7 +110,7 @@ public Response(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(destIndexMappings, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(destIndexMappings, StreamOutput::writeString); } public Map getDestIndexMappings() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java index 6f8c243ed3752..b306f13d19a63 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/DestConfig.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -64,8 +64,8 @@ public DestConfig(String index, List aliases, String pipeline) { public DestConfig(final StreamInput in) throws IOException { index = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { - aliases = in.readOptionalList(DestAlias::new); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { + aliases = in.readOptionalCollectionAsList(DestAlias::new); } else { aliases = null; } @@ -96,7 +96,7 @@ public void checkForDeprecations(String id, NamedXContentRegistry namedXContentR @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalCollection(aliases); } out.writeOptionalString(pipeline); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java index 3cf91be019b60..c0e903d99f113 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/NodeAttributes.java @@ -164,6 +164,6 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(name); out.writeString(ephemeralId); out.writeString(transportAddress); - out.writeMap(attributes, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(attributes, StreamOutput::writeString); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java index 441882cd53f9d..9b0fa3876819b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/SettingsConfig.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -163,17 +163,17 @@ public SettingsConfig(final StreamInput in) throws IOException { this.alignCheckpoints = in.readOptionalInt(); this.usePit = in.readOptionalInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { deduceMappings = in.readOptionalInt(); } else { deduceMappings = DEFAULT_DEDUCE_MAPPINGS; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { numFailureRetries = in.readOptionalInt(); } else { numFailureRetries = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { unattended = in.readOptionalInt(); } else { unattended = DEFAULT_UNATTENDED; @@ -279,13 +279,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalInt(alignCheckpoints); out.writeOptionalInt(usePit); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_1_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_1_0)) { out.writeOptionalInt(deduceMappings); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_4_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeOptionalInt(numFailureRetries); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_5_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_5_0)) { out.writeOptionalInt(unattended); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java index f80065fc37fd4..767b83f264184 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfig.java @@ -411,7 +411,7 @@ public void writeTo(final StreamOutput out) throws IOException { source.writeTo(out); dest.writeTo(out); out.writeOptionalTimeValue(frequency); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); out.writeOptionalWriteable(pivotConfig); out.writeOptionalWriteable(latestConfig); out.writeOptionalString(description); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java index d6e142a00fcc0..c3f4d3d382547 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformConfigUpdate.java @@ -174,7 +174,7 @@ public void writeTo(final StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(syncConfig); if (headers != null) { out.writeBoolean(true); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java index c967c2177a819..e202ab37bdc02 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformDestIndexSettings.java @@ -77,7 +77,7 @@ public TransformDestIndexSettings(Map mappings, Settings setting public TransformDestIndexSettings(StreamInput in) throws IOException { mappings = in.readMap(); settings = Settings.readSettingsFromStream(in); - aliases = new HashSet<>(in.readList(Alias::new)); + aliases = new HashSet<>(in.readCollectionAsList(Alias::new)); } public Map getMappings() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealth.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealth.java index 8e72548597391..3cbcddeae4069 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealth.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealth.java @@ -38,7 +38,7 @@ public TransformHealth(HealthStatus status, List issues) { public TransformHealth(StreamInput in) throws IOException { this.status = in.readEnum(HealthStatus.class); - this.issues = in.readOptionalList(TransformHealthIssue::new); + this.issues = in.readOptionalCollectionAsList(TransformHealthIssue::new); } public HealthStatus getStatus() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssue.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssue.java index d53df56e5d9a3..5697e1793f0b0 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssue.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssue.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -49,7 +49,7 @@ public TransformHealthIssue(String type, String issue, String details, int count } public TransformHealthIssue(StreamInput in) throws IOException { - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { this.type = in.readString(); } else { this.type = DEFAULT_TYPE_PRE_8_8; @@ -97,7 +97,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeString(type); } out.writeString(issue); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java index f09291d5a9a7d..2508b5e1bf01b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformState.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -131,7 +131,7 @@ public TransformState( public TransformState(StreamInput in) throws IOException { taskState = TransformTaskState.fromStream(in); indexerState = IndexerState.fromStream(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { position = in.readOptionalWriteable(TransformIndexerPosition::new); } else { Map pos = in.readMap(); @@ -140,17 +140,17 @@ public TransformState(StreamInput in) throws IOException { checkpoint = in.readLong(); reason = in.readOptionalString(); progress = in.readOptionalWriteable(TransformProgress::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { node = in.readOptionalWriteable(NodeAttributes::new); } else { node = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { shouldStopAtNextCheckpoint = in.readBoolean(); } else { shouldStopAtNextCheckpoint = false; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { authState = in.readOptionalWriteable(AuthorizationState::new); } else { authState = null; @@ -241,7 +241,7 @@ public String getWriteableName() { public void writeTo(StreamOutput out) throws IOException { taskState.writeTo(out); indexerState.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { out.writeOptionalWriteable(position); } else { out.writeGenericMap(position != null ? position.getIndexerPosition() : null); @@ -249,13 +249,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(checkpoint); out.writeOptionalString(reason); out.writeOptionalWriteable(progress); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_3_0)) { out.writeOptionalWriteable(node); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_6_0)) { out.writeBoolean(shouldStopAtNextCheckpoint); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0)) { out.writeOptionalWriteable(authState); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java index e7ea69fae493e..74b61d24bed41 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformStats.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -90,7 +90,7 @@ public TransformStats(StreamInput in) throws IOException { this.indexerStats = new TransformIndexerStats(in); this.checkpointingInfo = new TransformCheckpointingInfo(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { if (in.readBoolean()) { this.health = new TransformHealth(in); } else { @@ -134,7 +134,7 @@ public void writeTo(StreamOutput out) throws IOException { } indexerStats.writeTo(out); checkpointingInfo.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { if (health != null) { out.writeBoolean(true); health.writeTo(out); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java index 613a595a86195..9b49b58b0d6ec 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/TransformTaskParams.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.transform.transforms; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -76,7 +77,7 @@ public TransformTaskParams(String transformId, TransformConfigVersion version, I public TransformTaskParams(StreamInput in) throws IOException { this.transformId = in.readString(); this.version = TransformConfigVersion.readVersion(in); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.from = in.readOptionalInstant(); } else { this.from = null; @@ -92,14 +93,14 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_17_0; + return TransportVersions.V_7_17_0; } @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(transformId); TransformConfigVersion.writeVersion(version, out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalInstant(from); } out.writeOptionalTimeValue(frequency); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/latest/LatestConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/latest/LatestConfig.java index 9f27cf4f9012a..d1e4308b515b2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/latest/LatestConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/latest/LatestConfig.java @@ -71,7 +71,7 @@ public LatestConfig(List uniqueKey, String sort) { } public LatestConfig(StreamInput in) throws IOException { - this.uniqueKey = in.readStringList(); + this.uniqueKey = in.readStringCollectionAsList(); this.sort = in.readString(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java index b36a6a447b6b2..3d21a7f2f14bb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSource.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.transform.transforms.pivot; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -228,7 +228,7 @@ public DateHistogramGroupSource(StreamInput in) throws IOException { super(in); this.interval = readInterval(in); this.timeZone = in.readOptionalZoneId(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.offset = in.readLong(); } else { this.offset = 0; @@ -331,7 +331,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); writeInterval(interval, out); out.writeOptionalZoneId(timeZone); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeLong(offset); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java index 7cf8f7649b41e..4293595c711ef 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/transforms/pivot/GroupConfig.java @@ -93,7 +93,7 @@ public void checkForDeprecations(String id, NamedXContentRegistry namedXContentR @Override public void writeTo(StreamOutput out) throws IOException { out.writeGenericMap(source); - out.writeMap(groups, StreamOutput::writeString, (stream, value) -> { + out.writeMap(groups, (stream, value) -> { stream.writeByte(value.getType().getId()); value.writeTo(stream); }); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java index 78ca1035b57ae..cf9d2499b60b7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/votingonly/VotingOnlyNodeFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.votingonly; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.xpack.core.XPackFeatureSet; import org.elasticsearch.xpack.core.XPackField; @@ -24,7 +25,7 @@ public VotingOnlyNodeFeatureSetUsage() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_3_0; + return TransportVersions.V_7_3_0; } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java index b7db037fde49b..18ffeeb9a3206 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherFeatureSetUsage.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.watcher; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.xcontent.XContentBuilder; @@ -32,7 +33,7 @@ public WatcherFeatureSetUsage(boolean available, boolean enabled, Map stats() { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java index 918d9840a5fb1..994bb8b75178e 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.watcher; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; @@ -43,7 +44,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.MINIMUM_COMPATIBLE; + return TransportVersions.MINIMUM_COMPATIBLE; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java index 8271892dcaab3..2b80c32f3c327 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/execution/WatchExecutionSnapshot.java @@ -112,11 +112,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.timeField("execution_time", executionTime); builder.field("execution_phase", phase); if (executedActions != null) { - builder.startArray("executed_actions"); - for (String executedAction : executedActions) { - builder.value(executedAction); - } - builder.endArray(); + builder.array("executed_actions", executedActions); } if (params.paramAsBoolean("emit_stacktraces", false)) { builder.startArray("stack_trace"); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/QueryWatchesAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/QueryWatchesAction.java index 4bc5ab2fbc71e..70ffdc98831f8 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/QueryWatchesAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/QueryWatchesAction.java @@ -106,7 +106,7 @@ public Request(StreamInput in) throws IOException { size = in.readOptionalVInt(); query = in.readOptionalNamedWriteable(QueryBuilder.class); if (in.readBoolean()) { - sorts = in.readList(FieldSortBuilder::new); + sorts = in.readCollectionAsList(FieldSortBuilder::new); } else { sorts = null; } @@ -154,7 +154,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeOptionalNamedWriteable(query); if (sorts != null) { out.writeBoolean(true); - out.writeList(sorts); + out.writeCollection(sorts); } else { out.writeBoolean(false); } @@ -216,7 +216,7 @@ public Response(long watchTotalCount, List watches) { public Response(StreamInput in) throws IOException { super(in); - watches = in.readList(Item::new); + watches = in.readCollectionAsList(Item::new); watchTotalCount = in.readVLong(); } @@ -230,7 +230,7 @@ public long getWatchTotalCount() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(watches); + out.writeCollection(watches); out.writeVLong(watchTotalCount); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java index c8838875cc2d8..1ac98cd42d2e3 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/transport/actions/stats/WatcherStatsResponse.java @@ -54,12 +54,12 @@ public void writeTo(StreamOutput out) throws IOException { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(Node::new); + return in.readCollectionAsList(Node::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override @@ -105,10 +105,10 @@ public Node(StreamInput in) throws IOException { watcherState = WatcherState.fromId(in.readByte()); if (in.readBoolean()) { - snapshots = in.readList(WatchExecutionSnapshot::new); + snapshots = in.readCollectionAsList(WatchExecutionSnapshot::new); } if (in.readBoolean()) { - queuedWatches = in.readList(QueuedWatch::new); + queuedWatches = in.readCollectionAsList(QueuedWatch::new); } if (in.readBoolean()) { stats = new Counters(in); @@ -199,11 +199,11 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(snapshots != null); if (snapshots != null) { - out.writeList(snapshots); + out.writeCollection(snapshots); } out.writeBoolean(queuedWatches != null); if (queuedWatches != null) { - out.writeList(queuedWatches); + out.writeCollection(queuedWatches); } out.writeBoolean(stats != null); if (stats != null) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java index 719c97e249113..bfca11aee2955 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/watch/WatchStatus.java @@ -248,7 +248,7 @@ public void writeTo(StreamOutput out) throws IOException { boolean statusHasHeaders = headers != null && headers.isEmpty() == false; out.writeBoolean(statusHasHeaders); if (statusHasHeaders) { - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(headers, StreamOutput::writeString); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/GetFeatureUsageResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/GetFeatureUsageResponseTests.java index 3396dc8a0463c..cb29f2a85aa8b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/GetFeatureUsageResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/GetFeatureUsageResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.license; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.license.GetFeatureUsageResponse.FeatureUsageInfo; @@ -46,7 +47,7 @@ public void assertStreamInputOutput(TransportVersion version, String family, Str } public void testPre715StreamFormat() throws IOException { - assertStreamInputOutput(TransportVersionUtils.getPreviousVersion(TransportVersion.V_7_15_0), null, null); + assertStreamInputOutput(TransportVersionUtils.getPreviousVersion(TransportVersions.V_7_15_0), null, null); } public void testStreamFormat() throws IOException { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoRequestTests.java index 9065915fa4201..4aea054c83f2d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/protocol/xpack/XPackInfoRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.protocol.xpack; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.protocol.xpack.XPackInfoRequest.Category; import org.elasticsearch.test.ESTestCase; @@ -28,8 +29,8 @@ public void testSerializeUsing7xVersion() throws Exception { assertSerialization( TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_8_1, - TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_0_0) + TransportVersions.V_7_8_1, + TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_0_0) ) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java index ffce4c9ec79a6..d1ef79f1d61b4 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java @@ -19,6 +19,8 @@ import org.elasticsearch.cluster.metadata.DesiredNodesMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; +import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodes; @@ -37,6 +39,7 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; @@ -45,10 +48,13 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.elasticsearch.cluster.routing.allocation.DataTier.DATA_COLD; import static org.elasticsearch.cluster.routing.allocation.DataTier.DATA_FROZEN; @@ -62,12 +68,14 @@ public class DataTierAllocationDeciderTests extends ESAllocationTestCase { private static final DiscoveryNode HOT_NODE = newNode("node-hot", Collections.singleton(DiscoveryNodeRole.DATA_HOT_NODE_ROLE)); private static final DiscoveryNode WARM_NODE = newNode("node-warm", Collections.singleton(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)); + private static final DiscoveryNode WARM_NODE_TWO = newNode("node-warm-2", Collections.singleton(DiscoveryNodeRole.DATA_WARM_NODE_ROLE)); private static final DiscoveryNode COLD_NODE = newNode("node-cold", Collections.singleton(DiscoveryNodeRole.DATA_COLD_NODE_ROLE)); private static final DiscoveryNode CONTENT_NODE = newNode( "node-content", Collections.singleton(DiscoveryNodeRole.DATA_CONTENT_NODE_ROLE) ); private static final DiscoveryNode DATA_NODE = newNode("node-data", Collections.singleton(DiscoveryNodeRole.DATA_ROLE)); + private static final DiscoveryNode DATA_NODE_TWO = newNode("node-data-2", Collections.singleton(DiscoveryNodeRole.DATA_ROLE)); private static final DesiredNode HOT_DESIRED_NODE = newDesiredNode("node-hot", DiscoveryNodeRole.DATA_HOT_NODE_ROLE); private static final DesiredNode WARM_DESIRED_NODE = newDesiredNode("node-warm", DiscoveryNodeRole.DATA_WARM_NODE_ROLE); @@ -202,27 +210,110 @@ public void testIndexPrefer() { public void testTierNodesPresent() { DiscoveryNodes nodes = DiscoveryNodes.builder().build(); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data", nodes)); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data_hot", nodes)); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data_warm", nodes)); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data_cold", nodes)); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data_content", nodes)); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data", nodes, irrelevantNodeIds(nodes))); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_hot", nodes, irrelevantNodeIds(nodes))); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_warm", nodes, irrelevantNodeIds(nodes))); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_cold", nodes, irrelevantNodeIds(nodes))); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_content", nodes, irrelevantNodeIds(nodes))); nodes = DiscoveryNodes.builder().add(WARM_NODE).add(CONTENT_NODE).build(); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data", nodes)); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data_hot", nodes)); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data_warm", nodes)); - assertFalse(DataTierAllocationDecider.tierNodesPresent("data_cold", nodes)); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data_content", nodes)); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data", nodes, irrelevantNodeIds(nodes))); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_hot", nodes, irrelevantNodeIds(nodes))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_warm", nodes, irrelevantNodeIds(nodes))); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_cold", nodes, irrelevantNodeIds(nodes))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_content", nodes, irrelevantNodeIds(nodes))); nodes = DiscoveryNodes.builder().add(DATA_NODE).build(); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data", nodes)); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data_hot", nodes)); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data_warm", nodes)); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data_cold", nodes)); - assertTrue(DataTierAllocationDecider.tierNodesPresent("data_content", nodes)); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data", nodes, irrelevantNodeIds(nodes))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_hot", nodes, irrelevantNodeIds(nodes))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_warm", nodes, irrelevantNodeIds(nodes))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_cold", nodes, irrelevantNodeIds(nodes))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_content", nodes, irrelevantNodeIds(nodes))); + } + + public void testTierNodesPresentWithRelevantNodeShutdowns() { + { + DiscoveryNodes nodes = DiscoveryNodes.builder().add(HOT_NODE).add(WARM_NODE).add(DATA_NODE).build(); + + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_hot", nodes, Set.of(HOT_NODE.getId()))); + assertFalse( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_hot", + nodes, + Set.of(HOT_NODE.getId(), DATA_NODE.getId()) + ) + ); + + assertTrue( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_warm", + nodes, + Set.of(HOT_NODE.getId(), DATA_NODE.getId()) + ) + ); + assertFalse( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_warm", + nodes, + Set.of(HOT_NODE.getId(), WARM_NODE.getId(), DATA_NODE.getId()) + ) + ); + + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_cold", nodes, Set.of(HOT_NODE.getId()))); + assertFalse( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_cold", + nodes, + Set.of(HOT_NODE.getId(), DATA_NODE.getId()) + ) + ); + } + + { + DiscoveryNodes onlyTierNodes = DiscoveryNodes.builder().add(HOT_NODE).add(WARM_NODE).add(WARM_NODE_TWO).build(); + assertFalse(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_hot", onlyTierNodes, Set.of(HOT_NODE.getId()))); + assertTrue( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_warm", onlyTierNodes, Set.of(WARM_NODE.getId())) + ); + assertFalse( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_warm", + onlyTierNodes, + Set.of(WARM_NODE.getId(), WARM_NODE_TWO.getId()) + ) + ); + } + + { + DiscoveryNodes nodes = DiscoveryNodes.builder().add(HOT_NODE).add(DATA_NODE).add(DATA_NODE_TWO).build(); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_hot", nodes, Set.of(HOT_NODE.getId()))); + assertTrue( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_hot", + nodes, + Set.of(HOT_NODE.getId(), DATA_NODE.getId()) + ) + ); + assertTrue( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_hot", + nodes, + Set.of(HOT_NODE.getId(), DATA_NODE_TWO.getId()) + ) + ); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_warm", nodes, Set.of(DATA_NODE.getId()))); + assertTrue(DataTierAllocationDecider.tierNodesPresentConsideringRemovals("data_warm", nodes, Set.of(DATA_NODE_TWO.getId()))); + assertFalse( + DataTierAllocationDecider.tierNodesPresentConsideringRemovals( + "data_warm", + nodes, + Set.of(DATA_NODE.getId(), DATA_NODE_TWO.getId()) + ) + ); + + } } public void testTierNodesPresentDesiredNodes() { @@ -259,19 +350,47 @@ public void testPreferredTierAvailable() { : createDesiredNodesWithPendingNodes(HOT_DESIRED_NODE, WARM_DESIRED_NODE, COLD_DESIRED_NODE); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.empty()) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_hot,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_hot,data_warm"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.empty()) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_warm,data_content"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_content"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.empty()) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_cold"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_cold"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.empty()) ); } @@ -283,26 +402,57 @@ public void testPreferredTierAvailable() { : createDesiredNodesWithActualizedNodes(WARM_DESIRED_NODE, CONTENT_DESIRED_NODE); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.empty()) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_hot,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_hot,data_warm"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.of("data_warm")) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_warm,data_content"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_content"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.of("data_warm")) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_content,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_content,data_warm"), + nodes, + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) + ), equalTo(Optional.of("data_content")) ); assertThat( DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_hot,data_content,data_warm"), nodes, - desiredNodes + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) ), equalTo(Optional.of("data_content")) ); @@ -310,7 +460,10 @@ public void testPreferredTierAvailable() { DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_hot,data_cold,data_warm"), nodes, - desiredNodes + desiredNodes, + desiredNodes == null + ? irrelevantNodesShutdownMetadata(nodes) + : nodesShutdownMetadataForDesiredNodesTests(desiredNodes, nodes) ), equalTo(Optional.of("data_warm")) ); @@ -319,28 +472,45 @@ public void testPreferredTierAvailable() { { final var nodes = DiscoveryNodes.builder().add(WARM_NODE).add(CONTENT_NODE).build(); final var desiredNodes = createDesiredNodesWithActualizedNodes(HOT_DESIRED_NODE, WARM_DESIRED_NODE, CONTENT_DESIRED_NODE); + final var shutdownMetadata = irrelevantNodesShutdownMetadata(nodes); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data"), nodes, desiredNodes, shutdownMetadata), equalTo(Optional.empty()) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_hot,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_hot,data_warm"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_hot")) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_warm,data_content"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_content"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_warm")) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_content,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_content,data_warm"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_content")) ); assertThat( DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_hot,data_content,data_warm"), nodes, - desiredNodes + desiredNodes, + shutdownMetadata ), equalTo(Optional.of("data_hot")) ); @@ -348,7 +518,8 @@ public void testPreferredTierAvailable() { DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_hot,data_cold,data_warm"), nodes, - desiredNodes + desiredNodes, + shutdownMetadata ), equalTo(Optional.of("data_hot")) ); @@ -367,28 +538,45 @@ public void testPreferredTierAvailable() { actualizedDesiredNode(CONTENT_DESIRED_NODE) ) ); + final var shutdownMetadata = irrelevantNodesShutdownMetadata(nodes); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data"), nodes, desiredNodes, shutdownMetadata), equalTo(Optional.empty()) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_hot,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_hot,data_warm"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_warm")) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_warm,data_content"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_content"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_warm")) ); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_content,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_content,data_warm"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_content")) ); assertThat( DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_hot,data_content,data_warm"), nodes, - desiredNodes + desiredNodes, + shutdownMetadata ), equalTo(Optional.of("data_content")) ); @@ -396,7 +584,8 @@ public void testPreferredTierAvailable() { DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_hot,data_cold,data_warm"), nodes, - desiredNodes + desiredNodes, + shutdownMetadata ), equalTo(Optional.of("data_warm")) ); @@ -406,9 +595,15 @@ public void testPreferredTierAvailable() { // Cold tier is planned to be removed final var nodes = DiscoveryNodes.builder().add(HOT_NODE).add(WARM_NODE).add(COLD_NODE).build(); final var desiredNodes = createDesiredNodesWithActualizedNodes(HOT_DESIRED_NODE, WARM_DESIRED_NODE); + final var shutdownMetadata = irrelevantNodesShutdownMetadata(nodes); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_cold,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_cold,data_warm"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_warm")) ); } @@ -426,9 +621,15 @@ public void testPreferredTierAvailable() { pendingDesiredNode(COLD_DESIRED_NODE) ) ); + final var shutdownMetadata = irrelevantNodesShutdownMetadata(nodes); assertThat( - DataTierAllocationDecider.preferredAvailableTier(DataTier.parseTierList("data_cold,data_warm"), nodes, desiredNodes), + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_cold,data_warm"), + nodes, + desiredNodes, + shutdownMetadata + ), equalTo(Optional.of("data_cold")) ); } @@ -437,6 +638,7 @@ public void testPreferredTierAvailable() { // Ensure that when we are removing a tier and growing the next preferred tier we wait until all the new // nodes have joined the cluster avoiding filling the new nodes with shards from the removed tier final var nodes = DiscoveryNodes.builder().add(HOT_NODE).add(WARM_NODE).add(COLD_NODE).build(); + final var shutdownMetadata = irrelevantNodesShutdownMetadata(nodes); final DesiredNodes desiredNodes; // Grow any of the next preferred tiers if (randomBoolean()) { @@ -467,7 +669,8 @@ public void testPreferredTierAvailable() { DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_cold,data_warm,data_hot"), nodes, - desiredNodes + desiredNodes, + shutdownMetadata ), equalTo(Optional.of("data_cold")) ); @@ -483,13 +686,97 @@ public void testPreferredTierAvailable() { DataTierAllocationDecider.preferredAvailableTier( DataTier.parseTierList("data_cold,data_warm,data_hot"), nodes, - updatedDesiredNodes + updatedDesiredNodes, + shutdownMetadata ), equalTo(Optional.of("data_warm")) ); } } + public void testDataTierDeciderConsidersNodeShutdown() { + final var nodes = DiscoveryNodes.builder().add(HOT_NODE).add(WARM_NODE).add(COLD_NODE).build(); + final DesiredNodes desiredNodes = null; // Desired nodes will take precedence over node shutdown if it is present + + assertThat( + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_cold,data_hot"), + nodes, + desiredNodes, + new NodesShutdownMetadata(Map.of(WARM_NODE.getId(), randomShutdownMetadataRemovingNode(WARM_NODE.getId()))) + ), + equalTo(Optional.of("data_cold")) + ); + + assertThat( + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_cold,data_hot"), + nodes, + desiredNodes, + new NodesShutdownMetadata( + Map.of( + WARM_NODE.getId(), + randomShutdownMetadataRemovingNode(WARM_NODE.getId()), + COLD_NODE.getId(), + randomShutdownMetadataRemovingNode(COLD_NODE.getId()) + ) + ) + ), + equalTo(Optional.of("data_hot")) + ); + + assertThat( + DataTierAllocationDecider.preferredAvailableTier( + DataTier.parseTierList("data_warm,data_cold,data_hot"), + nodes, + desiredNodes, + new NodesShutdownMetadata( + Map.of( + WARM_NODE.getId(), + randomShutdownMetadataRemovingNode(WARM_NODE.getId()), + COLD_NODE.getId(), + randomShutdownMetadataRemovingNode(COLD_NODE.getId()), + HOT_NODE.getId(), + randomShutdownMetadataRemovingNode(HOT_NODE.getId()) + ) + ) + ), + equalTo(Optional.empty()) + ); + + } + + private SingleNodeShutdownMetadata randomShutdownMetadataRemovingNode(String nodeId) { + SingleNodeShutdownMetadata.Type type = randomFrom( + SingleNodeShutdownMetadata.Type.SIGTERM, + SingleNodeShutdownMetadata.Type.REPLACE, + SingleNodeShutdownMetadata.Type.REMOVE + ); + return switch (type) { + case REMOVE -> SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(type) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .build(); + case REPLACE -> SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(type) + .setTargetNodeName(randomAlphaOfLength(10)) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .build(); + case SIGTERM -> SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(type) + .setGracePeriod(TimeValue.parseTimeValue(randomTimeValue(), this.getTestName())) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .build(); + case RESTART -> throw new AssertionError("bad randomization, this method only generates removal type shutdowns"); + }; + } + public void testFrozenIllegalForRegularIndices() { List tierList = new ArrayList<>(randomSubsetOf(DataTier.ALL_DATA_TIERS)); if (tierList.contains(DATA_FROZEN) == false) { @@ -660,4 +947,78 @@ private DesiredNodeWithStatus actualizedDesiredNode(DesiredNode desiredNode) { private DesiredNodeWithStatus pendingDesiredNode(DesiredNode desiredNode) { return new DesiredNodeWithStatus(desiredNode, DesiredNodeWithStatus.Status.PENDING); } + + /** + * Creates node shutdown metadata that should not impact the decider, either because it is empty or because it is irrelevant to the + * decider logic. + */ + private NodesShutdownMetadata irrelevantNodesShutdownMetadata(DiscoveryNodes currentNodes) { + final Set currentNodeIds = currentNodes.stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); + int kind = currentNodes.size() == 0 ? randomFrom(1, 2) : randomFrom(1, 2, 3); + return switch (kind) { + case 1 -> new NodesShutdownMetadata(Collections.emptyMap()); + case 2 -> randomRemovalNotInCluster(currentNodeIds); + case 3 -> randomRestartInCluster(currentNodeIds); + default -> throw new AssertionError("not all randomization branches covered in test"); + }; + } + + /** + * Desired Nodes should take precedence over node shutdown if it's in use, so this method generates node shutdown metadata that + * intersects with current or desired nodes. The output of this function shouldn't matter, whatever node shutdowns it generates; if it + * does there's a bug. + */ + private NodesShutdownMetadata nodesShutdownMetadataForDesiredNodesTests(DesiredNodes desiredNodes, DiscoveryNodes currentNodes) { + // Note that the desired node's External ID is not the same as the final node ID, but mix them in anyway + Set nodeIds = Stream.concat( + desiredNodes != null ? desiredNodes.nodes().stream().map(DesiredNodeWithStatus::externalId) : Stream.empty(), + currentNodes.stream().map(DiscoveryNode::getId) + ).collect(Collectors.toSet()); + if (nodeIds.isEmpty()) { + return new NodesShutdownMetadata(Collections.emptyMap()); + } + int kind = randomFrom(1, 2); + return switch (kind) { + case 1 -> new NodesShutdownMetadata(Collections.emptyMap()); + case 2 -> randomRemovalInCluster(nodeIds); + default -> throw new AssertionError("not all randomization branches covered in test"); + }; + } + + private Set irrelevantNodeIds(DiscoveryNodes currentNodes) { + Set nodeIds = new HashSet<>(); + int numIds = randomIntBetween(0, 10); + for (int i = 0; i < numIds; i++) { + nodeIds.add( + randomValueOtherThanMany((val) -> currentNodes.nodeExists(val) || nodeIds.contains(val), () -> randomAlphaOfLength(10)) + ); + } + return nodeIds; + } + + private NodesShutdownMetadata randomRemovalNotInCluster(Set currentNodes) { + String nodeId = randomValueOtherThanMany(currentNodes::contains, () -> randomAlphaOfLength(10)); + return new NodesShutdownMetadata(Map.of(nodeId, randomShutdownMetadataRemovingNode(nodeId))); + } + + private NodesShutdownMetadata randomRemovalInCluster(Set currentNodes) { + String nodeId = randomFrom(currentNodes); + return new NodesShutdownMetadata(Map.of(nodeId, randomShutdownMetadataRemovingNode(nodeId))); + } + + private NodesShutdownMetadata randomRestartInCluster(Set currentNodes) { + String nodeId = randomFrom(currentNodes); + return new NodesShutdownMetadata( + Map.of( + nodeId, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId) + .setType(SingleNodeShutdownMetadata.Type.RESTART) + .setReason(this.getTestName()) + .setStartedAtMillis(randomNonNegativeLong()) + .build() + ) + ); + } + } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java index f26152d66045d..c64cb7e546861 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ClientHelperTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction; import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest; @@ -389,6 +390,7 @@ public void testGetPersistableSafeSecurityHeaders() throws IOException { final ClusterState clusterState = mock(ClusterState.class); final DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class); when(clusterState.nodes()).thenReturn(discoveryNodes); + when(clusterState.getMinTransportVersion()).thenReturn(TransportVersions.MINIMUM_COMPATIBLE); // No security header ThreadContext threadContext = new ThreadContext(Settings.EMPTY); final String nonSecurityHeaderKey = "not-a-security-header"; @@ -444,7 +446,7 @@ public void testGetPersistableSafeSecurityHeaders() throws IOException { // Rewritten for older version final TransportVersion previousVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion() ); when(clusterState.getMinTransportVersion()).thenReturn(previousVersion); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java index 7c57a9cf04784..868bbc03b652c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ilm; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; @@ -54,7 +55,7 @@ protected Metadata.Custom mutateInstance(Metadata.Custom instance) { public void testMinimumSupportedVersion() { TransportVersion min = createTestInstance().getMinimalSupportedVersion(); assertTrue( - min.onOrBefore(TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_8_7_0, TransportVersion.current())) + min.onOrBefore(TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_8_7_0, TransportVersion.current())) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java index 571eee9270161..e849512aa8f73 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/MockAction.java @@ -45,7 +45,7 @@ public MockAction(List steps, boolean safe) { } public MockAction(StreamInput in) throws IOException { - this.steps = in.readList(MockStep::new); + this.steps = in.readCollectionAsList(MockStep::new); this.safe = in.readBoolean(); } @@ -77,7 +77,7 @@ public List toSteps(Client client, String phase, Step.StepKey nextStepKey) @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(steps.stream().map(MockStep::new).collect(Collectors.toList())); + out.writeCollection(steps.stream().map(MockStep::new).collect(Collectors.toList())); out.writeBoolean(safe); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverActionTests.java index 9ec5160ce5b22..cb13e694f9c24 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/RolloverActionTests.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.RolloverConditions; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.unit.ByteSizeValue; @@ -150,12 +150,12 @@ public void testToSteps() { public void testBwcSerializationWithMaxPrimaryShardDocs() throws Exception { // In case of serializing to node with older version, replace maxPrimaryShardDocs with maxDocs. RolloverAction instance = new RolloverAction(null, null, null, null, 1L, null, null, null, null, null); - RolloverAction deserializedInstance = copyInstance(instance, TransportVersion.V_8_1_0); + RolloverAction deserializedInstance = copyInstance(instance, TransportVersions.V_8_1_0); assertThat(deserializedInstance.getConditions().getMaxPrimaryShardDocs(), nullValue()); // But not if maxDocs is also specified: instance = new RolloverAction(null, null, null, 2L, 1L, null, null, null, null, null); - deserializedInstance = copyInstance(instance, TransportVersion.V_8_1_0); + deserializedInstance = copyInstance(instance, TransportVersions.V_8_1_0); assertThat(deserializedInstance.getConditions().getMaxPrimaryShardDocs(), nullValue()); assertThat(deserializedInstance.getConditions().getMaxDocs(), equalTo(instance.getConditions().getMaxDocs())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java index 42b1bb8c1660f..5b5754af78018 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/indexing/AsyncTwoPhaseIndexerTests.java @@ -33,6 +33,7 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; @@ -397,9 +398,8 @@ private static class MockThreadPool extends TestThreadPool { } @Override - public ScheduledCancellable schedule(Runnable command, TimeValue delay, String executor) { + public ScheduledCancellable schedule(Runnable command, TimeValue delay, Executor executor) { delays.add(delay); - return super.schedule(command, TimeValue.ZERO, executor); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java index 7f8e6c72d3fff..e9a5b08f8051d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/AbstractBWCWireSerializationTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -19,7 +20,7 @@ public abstract class AbstractBWCWireSerializationTestCase extends AbstractWireSerializingTestCase { public static List getAllBWCVersions() { - int minCompatVersion = Collections.binarySearch(ALL_VERSIONS, TransportVersion.MINIMUM_COMPATIBLE); + int minCompatVersion = Collections.binarySearch(ALL_VERSIONS, TransportVersions.MINIMUM_COMPATIBLE); return ALL_VERSIONS.subList(minCompatVersion, ALL_VERSIONS.size()); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java index be82f11b00260..a369219bd7c3c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/FlushJobActionRequestTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; import org.elasticsearch.xpack.core.ml.action.FlushJobAction.Request; @@ -52,7 +53,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_500_012)) { + if (version.before(TransportVersions.V_8_500_020)) { instance.setRefreshRequired(true); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java index 1b21dc547a8ac..7da61dec302a5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/GetTrainedModelsStatsActionResponseTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.ingest.IngestStats; import org.elasticsearch.xpack.core.action.util.QueryPage; @@ -78,7 +79,7 @@ protected Writeable.Reader instanceReader() { @Override protected Response mutateInstanceForVersion(Response instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_0_0)) { + if (version.before(TransportVersions.V_8_0_0)) { return new Response( new QueryPage<>( instance.getResources() @@ -99,7 +100,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion RESULTS_FIELD ) ); - } else if (version.before(TransportVersion.V_8_1_0)) { + } else if (version.before(TransportVersions.V_8_1_0)) { return new Response( new QueryPage<>( instance.getResources() @@ -157,7 +158,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion RESULTS_FIELD ) ); - } else if (version.before(TransportVersion.V_8_2_0)) { + } else if (version.before(TransportVersions.V_8_2_0)) { return new Response( new QueryPage<>( instance.getResources() @@ -215,7 +216,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion RESULTS_FIELD ) ); - } else if (version.before(TransportVersion.V_8_4_0)) { + } else if (version.before(TransportVersions.V_8_4_0)) { return new Response( new QueryPage<>( instance.getResources() @@ -273,7 +274,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion RESULTS_FIELD ) ); - } else if (version.before(TransportVersion.V_8_5_0)) { + } else if (version.before(TransportVersions.V_8_5_0)) { return new Response( new QueryPage<>( instance.getResources() @@ -331,7 +332,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion RESULTS_FIELD ) ); - } else if (version.before(TransportVersion.V_8_6_0)) { + } else if (version.before(TransportVersions.V_8_6_0)) { // priority added return new Response( new QueryPage<>( @@ -390,7 +391,7 @@ protected Response mutateInstanceForVersion(Response instance, TransportVersion RESULTS_FIELD ) ); - } else if (version.before(TransportVersion.V_8_8_0)) { + } else if (version.before(TransportVersions.V_8_8_0)) { // deployment_id added return new Response( new QueryPage<>( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java index ebd348ea567e9..2f073cb32d09a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferModelActionRequestTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.TimeValue; @@ -179,7 +180,7 @@ protected Request mutateInstanceForVersion(Request instance, TransportVersion ve adjustedUpdate = currentUpdate; } - if (version.before(TransportVersion.V_8_3_0)) { + if (version.before(TransportVersions.V_8_3_0)) { return new Request( instance.getId(), adjustedUpdate, @@ -188,7 +189,7 @@ protected Request mutateInstanceForVersion(Request instance, TransportVersion ve TimeValue.MAX_VALUE, instance.isPreviouslyLicensed() ); - } else if (version.before(TransportVersion.V_8_7_0)) { + } else if (version.before(TransportVersions.V_8_7_0)) { return new Request( instance.getId(), adjustedUpdate, @@ -197,7 +198,7 @@ protected Request mutateInstanceForVersion(Request instance, TransportVersion ve instance.getInferenceTimeout(), instance.isPreviouslyLicensed() ); - } else if (version.before(TransportVersion.V_8_8_0)) { + } else if (version.before(TransportVersions.V_8_8_0)) { var r = new Request( instance.getId(), adjustedUpdate, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentResponseTests.java index 61bb883739e73..4db7d05b60658 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/InferTrainedModelDeploymentResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -67,7 +68,7 @@ protected InferTrainedModelDeploymentAction.Response mutateInstanceForVersion( InferTrainedModelDeploymentAction.Response instance, TransportVersion version ) { - if (version.before(TransportVersion.V_8_6_1)) { + if (version.before(TransportVersions.V_8_6_1)) { return new InferTrainedModelDeploymentAction.Response(instance.getResults().subList(0, 1)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java index 99cc52e180ca7..4ffa2e27fe60c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/action/PutTrainedModelDefinitionPartActionRequestTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.ml.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.stream.Writeable; @@ -71,7 +72,7 @@ protected Writeable.Reader instanceReader() { @Override protected Request mutateInstanceForVersion(Request instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_500_043)) { + if (version.before(TransportVersions.V_8_500_043)) { return new Request( instance.getModelId(), instance.getDefinition(), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderWireSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderWireSerializationTests.java index dbe852b5f6a82..6e3aad0c8bc11 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderWireSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/datafeed/AggProviderWireSerializationTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; @@ -73,7 +74,7 @@ public static AggProvider createRandomValidAggProvider() { @Override protected AggProvider mutateInstanceForVersion(AggProvider instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_0_0)) { + if (version.before(TransportVersions.V_8_0_0)) { return new AggProvider(instance.getAggs(), instance.getParsedAggs(), instance.getParsingException(), false); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java index 5857eca3789b2..e0318e3c63bc5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsConfigTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -116,7 +117,7 @@ protected DataFrameAnalyticsConfig mutateInstanceForVersion(DataFrameAnalyticsCo if (instance.getAnalysis() instanceof Classification) { builder.setAnalysis(ClassificationTests.mutateForVersion((Classification) instance.getAnalysis(), version)); } - if (version.before(TransportVersion.V_8_8_0)) { + if (version.before(TransportVersions.V_8_8_0)) { builder.setMeta(null); } return builder.build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationTests.java index cd763fd0ffcab..79c069afbd4ab 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; @@ -20,7 +21,7 @@ public class BertJapaneseTokenizationTests extends AbstractBWCSerializationTestC private boolean lenient; public static BertJapaneseTokenization mutateForVersion(BertJapaneseTokenization instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_2_0)) { + if (version.before(TransportVersions.V_8_2_0)) { return new BertJapaneseTokenization( instance.doLowerCase, instance.withSpecialTokens, diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationUpdateTests.java index 99ec0905687c0..f8f87b2eb4144 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertJapaneseTokenizationUpdateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; @@ -74,7 +75,7 @@ protected BertJapaneseTokenizationUpdate mutateInstance(BertJapaneseTokenization @Override protected BertJapaneseTokenizationUpdate mutateInstanceForVersion(BertJapaneseTokenizationUpdate instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_2_0)) { + if (version.before(TransportVersions.V_8_2_0)) { return new BertJapaneseTokenizationUpdate(instance.getTruncate(), null); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java index d7d62eae9b03c..a00ebec79a862 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/BertTokenizationTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; @@ -20,7 +21,7 @@ public class BertTokenizationTests extends AbstractBWCSerializationTestCase { public static TextEmbeddingConfig mutateForVersion(TextEmbeddingConfig instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_8_0)) { + if (version.before(TransportVersions.V_8_8_0)) { return new TextEmbeddingConfig( instance.getVocabularyConfig(), InferenceConfigTestScaffolding.mutateTokenizationForVersion(instance.getTokenization(), version), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdateTests.java index 3f3e5201efdcc..06abb12bdb0a2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextEmbeddingConfigUpdateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.xcontent.XContentParser; @@ -35,7 +36,7 @@ public static TextEmbeddingConfigUpdate randomUpdate() { } public static TextEmbeddingConfigUpdate mutateForVersion(TextEmbeddingConfigUpdate instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_1_0)) { + if (version.before(TransportVersions.V_8_1_0)) { return new TextEmbeddingConfigUpdate(instance.getResultsField(), null); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdateTests.java index 5d244441348e1..ba373dfd23b64 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextExpansionConfigUpdateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.xcontent.XContentParser; @@ -30,7 +31,7 @@ public static TextExpansionConfigUpdate randomUpdate() { } public static TextExpansionConfigUpdate mutateForVersion(TextExpansionConfigUpdate instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_1_0)) { + if (version.before(TransportVersions.V_8_1_0)) { return new TextExpansionConfigUpdate(instance.getResultsField(), null); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java index 23bb6e1e15452..b8b5f50d15bae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/TextSimilarityConfigUpdateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.xcontent.XContentParser; @@ -40,7 +41,7 @@ public static TextSimilarityConfigUpdate randomUpdate() { } public static TextSimilarityConfigUpdate mutateForVersion(TextSimilarityConfigUpdate instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_1_0)) { + if (version.before(TransportVersions.V_8_1_0)) { return new TextSimilarityConfigUpdate(instance.getText(), instance.getResultsField(), null, null); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationTests.java index 80b3db42ade64..c7525b1c571a2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xpack.core.ml.AbstractBWCSerializationTestCase; @@ -20,7 +21,7 @@ public class XLMRobertaTokenizationTests extends AbstractBWCSerializationTestCas private boolean lenient; public static XLMRobertaTokenization mutateForVersion(XLMRobertaTokenization instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_2_0)) { + if (version.before(TransportVersions.V_8_2_0)) { return new XLMRobertaTokenization(instance.withSpecialTokens, instance.maxSequenceLength, instance.truncate, null); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationUpdateTests.java index 8b0b3b6117646..90492cbcfde1c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/XLMRobertaTokenizationUpdateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.xpack.core.ml.AbstractBWCWireSerializationTestCase; @@ -67,7 +68,7 @@ protected XLMRobertaTokenizationUpdate mutateInstance(XLMRobertaTokenizationUpda @Override protected XLMRobertaTokenizationUpdate mutateInstanceForVersion(XLMRobertaTokenizationUpdate instance, TransportVersion version) { - if (version.before(TransportVersion.V_8_2_0)) { + if (version.before(TransportVersions.V_8_2_0)) { return new XLMRobertaTokenizationUpdate(instance.getTruncate(), null); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java index 0fadd891cb027..09c8eed048d96 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/inference/trainedmodel/ZeroShotClassificationConfigUpdateTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.ml.inference.trainedmodel; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.xcontent.XContentParser; @@ -37,7 +38,7 @@ public static ZeroShotClassificationConfigUpdate mutateForVersion( ZeroShotClassificationConfigUpdate instance, TransportVersion version ) { - if (version.before(TransportVersion.V_8_1_0)) { + if (version.before(TransportVersions.V_8_1_0)) { return new ZeroShotClassificationConfigUpdate(instance.getLabels(), instance.getMultiLabel(), instance.getResultsField(), null); } return instance; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java index bac11d9f0fc53..63dd636a31c3f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/CreateApiKeyRequestTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -161,12 +161,12 @@ public void testSerialization() throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { if (testV710Bwc) { - out.setTransportVersion(TransportVersion.V_7_9_0); // a version before 7.10 + out.setTransportVersion(TransportVersions.V_7_9_0); // a version before 7.10 } request.writeTo(out); try (StreamInput in = out.bytes().streamInput()) { if (testV710Bwc) { - in.setTransportVersion(TransportVersion.V_7_9_0); + in.setTransportVersion(TransportVersions.V_7_9_0); } final CreateApiKeyRequest serialized = new CreateApiKeyRequest(in); assertEquals(name, serialized.getName()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java index 1001cd4863f5d..50369cadaf365 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/GetApiKeyRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -134,11 +135,13 @@ public void testSerialization() throws IOException { final GetApiKeyRequest getApiKeyRequest = GetApiKeyRequest.builder().ownedByAuthenticatedUser(true).apiKeyId(apiKeyId).build(); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_3_0)); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_3_0)); getApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); - inputStreamStreamInput.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_3_0)); + inputStreamStreamInput.setTransportVersion( + randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_3_0) + ); GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); assertThat(requestFromInputStream.getApiKeyId(), equalTo(getApiKeyRequest.getApiKeyId())); @@ -154,11 +157,13 @@ public void testSerialization() throws IOException { .build(); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_4_0, TransportVersion.V_8_4_0)); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_7_4_0, TransportVersions.V_8_4_0)); getApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); - inputStreamStreamInput.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_4_0, TransportVersion.V_8_4_0)); + inputStreamStreamInput.setTransportVersion( + randomVersionBetween(random(), TransportVersions.V_7_4_0, TransportVersions.V_8_4_0) + ); GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); assertThat(requestFromInputStream.getApiKeyId(), equalTo(getApiKeyRequest.getApiKeyId())); @@ -178,11 +183,11 @@ public void testSerialization() throws IOException { ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); TransportVersion beforeActiveOnly = TransportVersionUtils.getPreviousVersion(API_KEY_ACTIVE_ONLY_PARAM_TRANSPORT_VERSION); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_8_5_0, beforeActiveOnly)); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_8_5_0, beforeActiveOnly)); getApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); - inputStreamStreamInput.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_8_5_0, beforeActiveOnly)); + inputStreamStreamInput.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_8_5_0, beforeActiveOnly)); GetApiKeyRequest requestFromInputStream = new GetApiKeyRequest(inputStreamStreamInput); assertThat(requestFromInputStream.getApiKeyId(), equalTo(getApiKeyRequest.getApiKeyId())); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java index ab324f7dbbe53..8c84f6f0e9b5a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/apikey/InvalidateApiKeyRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.action.apikey; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; @@ -119,7 +120,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeOptionalString(realm); out.writeOptionalString(user); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_10_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_10_0)) { if (Strings.hasText(apiKeyId)) { out.writeOptionalStringArray(new String[] { apiKeyId }); } else { @@ -161,8 +162,8 @@ public void writeTo(StreamOutput out) throws IOException { ) { TransportVersion streamVersion = randomVersionBetween( random(), - TransportVersion.V_7_4_0, - getPreviousVersion(TransportVersion.V_7_10_0) + TransportVersions.V_7_4_0, + getPreviousVersion(TransportVersions.V_7_10_0) ); Dummy d = new Dummy(inputs[caseNo]); osso.setTransportVersion(streamVersion); @@ -188,11 +189,13 @@ public void testSerialization() throws IOException { { ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_3_0)); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_3_0)); invalidateApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); - inputStreamStreamInput.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_3_0)); + inputStreamStreamInput.setTransportVersion( + randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_3_0) + ); InvalidateApiKeyRequest requestFromInputStream = new InvalidateApiKeyRequest(inputStreamStreamInput); assertThat(requestFromInputStream.getIds(), equalTo(invalidateApiKeyRequest.getIds())); @@ -202,11 +205,13 @@ public void testSerialization() throws IOException { { ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_4_0, TransportVersion.V_7_9_0)); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_7_4_0, TransportVersions.V_7_9_0)); invalidateApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); - inputStreamStreamInput.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_4_0, TransportVersion.V_7_9_0)); + inputStreamStreamInput.setTransportVersion( + randomVersionBetween(random(), TransportVersions.V_7_4_0, TransportVersions.V_7_9_0) + ); InvalidateApiKeyRequest requestFromInputStream = new InvalidateApiKeyRequest(inputStreamStreamInput); assertThat(requestFromInputStream, equalTo(invalidateApiKeyRequest)); @@ -214,12 +219,12 @@ public void testSerialization() throws IOException { { ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_10_0, TransportVersion.current())); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_7_10_0, TransportVersion.current())); invalidateApiKeyRequest.writeTo(out); InputStreamStreamInput inputStreamStreamInput = new InputStreamStreamInput(new ByteArrayInputStream(outBuffer.toByteArray())); inputStreamStreamInput.setTransportVersion( - randomVersionBetween(random(), TransportVersion.V_7_10_0, TransportVersion.current()) + randomVersionBetween(random(), TransportVersions.V_7_10_0, TransportVersion.current()) ); InvalidateApiKeyRequest requestFromInputStream = new InvalidateApiKeyRequest(inputStreamStreamInput); @@ -237,7 +242,7 @@ public void testSerializationWillThrowWhenMultipleIdsAndOldVersionStream() { ); ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(randomVersionBetween(random(), TransportVersion.V_7_4_0, getPreviousVersion(TransportVersion.V_7_10_0))); + out.setTransportVersion(randomVersionBetween(random(), TransportVersions.V_7_4_0, getPreviousVersion(TransportVersions.V_7_10_0))); final IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> invalidateApiKeyRequest.writeTo(out)); assertThat(e.getMessage(), containsString("a request with multi-valued field [ids] cannot be sent to an older version")); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java index 1acdb3b161afc..d864a89581a18 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/role/PutRoleRequestTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.action.role; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.Strings; @@ -173,7 +174,7 @@ public void testSerialization() throws IOException { logger.info("Serializing with version {}", version); out.setTransportVersion(version); } - final boolean mayIncludeRemoteIndices = out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0); + final boolean mayIncludeRemoteIndices = out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0); final PutRoleRequest original = buildRandomRequest(mayIncludeRemoteIndices); original.writeTo(out); @@ -190,11 +191,11 @@ public void testSerialization() throws IOException { public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); final TransportVersion versionBeforeAdvancedRemoteClusterSecurity = TransportVersionUtils.getPreviousVersion( - TransportVersion.V_8_8_0 + TransportVersions.V_8_8_0 ); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, versionBeforeAdvancedRemoteClusterSecurity ); out.setTransportVersion(version); @@ -206,7 +207,7 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro ex.getMessage(), containsString( "versions of Elasticsearch before [" - + TransportVersion.V_8_8_0 + + TransportVersions.V_8_8_0 + "] can't handle remote indices privileges and attempted to send to [" + version + "]" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java index 3fa5044a8883e..9556d09186311 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/action/user/GetUserPrivilegesResponseTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.action.user; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.ByteBufferStreamInput; @@ -65,7 +66,7 @@ public void testSerialization() throws IOException { public void testSerializationForCurrentVersion() throws Exception { final TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); - final boolean canIncludeRemoteIndices = version.onOrAfter(TransportVersion.V_8_8_0); + final boolean canIncludeRemoteIndices = version.onOrAfter(TransportVersions.V_8_8_0); final GetUserPrivilegesResponse original = randomResponse(canIncludeRemoteIndices); @@ -83,11 +84,11 @@ public void testSerializationForCurrentVersion() throws Exception { public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() throws IOException { final BytesStreamOutput out = new BytesStreamOutput(); final TransportVersion versionBeforeAdvancedRemoteClusterSecurity = TransportVersionUtils.getPreviousVersion( - TransportVersion.V_8_8_0 + TransportVersions.V_8_8_0 ); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, versionBeforeAdvancedRemoteClusterSecurity ); out.setTransportVersion(version); @@ -99,7 +100,7 @@ public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() thro ex.getMessage(), containsString( "versions of Elasticsearch before [" - + TransportVersion.V_8_8_0 + + TransportVersions.V_8_8_0 + "] can't handle remote indices privileges and attempted to send to [" + version + "]" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationSerializationTests.java index 6b2b3748ce36b..b098b686f6c2c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationSerializationTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.security.authc; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.test.ESTestCase; @@ -83,7 +84,7 @@ public void testWriteToWithCrossClusterAccessThrowsOnUnsupportedVersion() throws final BytesStreamOutput out = new BytesStreamOutput(); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, TransportVersionUtils.getPreviousVersion(RemoteClusterPortSettings.TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) ); out.setTransportVersion(version); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java index 7014b9f299cd9..e1a60d41ca212 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/AuthenticationTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.authc; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -662,10 +663,10 @@ public void testDomainSerialize() throws Exception { } try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setTransportVersion(TransportVersion.V_8_0_0); + out.setTransportVersion(TransportVersions.V_8_0_0); test.writeTo(out); StreamInput in = out.bytes().streamInput(); - in.setTransportVersion(TransportVersion.V_8_0_0); + in.setTransportVersion(TransportVersions.V_8_0_0); Authentication testBack = new Authentication(in); assertThat(testBack.getDomain(), nullValue()); assertThat(testBack.isAssignedToDomain(), is(false)); @@ -847,7 +848,7 @@ public void testMaybeRewriteForOlderVersionWithCrossClusterAccessThrowsOnUnsuppo ); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, // the minimum compatible version of 8.x + TransportVersions.V_7_17_0, // the minimum compatible version of 8.x versionBeforeCrossClusterAccessRealm ); @@ -909,7 +910,7 @@ public void testMaybeRewriteMetadataForCrossClusterAccessAuthentication() throws // pick a version before that of the authentication instance to force a rewrite final TransportVersion olderVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(authentication.getEffectiveSubject().getTransportVersion()) ); @@ -952,7 +953,7 @@ public void testCopyWithFilteredMetadataFields() { public void testMaybeRewriteForOlderVersionErasesDomainForVersionsBeforeDomains() { final TransportVersion olderVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, TransportVersionUtils.getPreviousVersion(Authentication.VERSION_REALM_DOMAINS) ); final Authentication authentication = AuthenticationTestHelper.builder() @@ -1171,7 +1172,7 @@ public static Authentication randomAuthentication(User user, RealmRef realmRef, realmRef = randomRealmRef(false); } // If the realm is expected to have a domain, we need a version that's at least compatible with domains - final TransportVersion minVersion = realmRef.getDomain() != null ? Authentication.VERSION_REALM_DOMAINS : TransportVersion.V_7_0_0; + final TransportVersion minVersion = realmRef.getDomain() != null ? Authentication.VERSION_REALM_DOMAINS : TransportVersions.V_7_0_0; final TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), minVersion, TransportVersion.current()); final Map metadata; if (randomBoolean()) { @@ -1188,7 +1189,7 @@ public static Authentication randomApiKeyAuthentication(User user, String apiKey return randomApiKeyAuthentication( user, apiKeyId, - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.current()) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersion.current()) ); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java index 6e5bf4ffcdc5a..1c4592c331080 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/SubjectTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.security.authc; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -302,7 +303,7 @@ public void testGetRoleReferencesForApiKeyBwc() { final Subject subject = new Subject( new User("joe"), new Authentication.RealmRef(API_KEY_REALM_NAME, API_KEY_REALM_TYPE, "node"), - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_8_1), + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_8_1), authMetadata ); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java index bb7d04d5aeb17..294ad7e975286 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authz/RoleDescriptorTests.java @@ -8,6 +8,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -495,7 +496,7 @@ public void testParsingFieldPermissionsUsesCache() throws IOException { public void testSerializationForCurrentVersion() throws Exception { final TransportVersion version = TransportVersionUtils.randomCompatibleVersion(random()); - final boolean canIncludeRemoteIndices = version.onOrAfter(TransportVersion.V_8_8_0); + final boolean canIncludeRemoteIndices = version.onOrAfter(TransportVersions.V_8_8_0); final boolean canIncludeWorkflows = version.onOrAfter(WORKFLOWS_RESTRICTION_VERSION); logger.info("Testing serialization with version {}", version); BytesStreamOutput output = new BytesStreamOutput(); @@ -515,10 +516,10 @@ public void testSerializationForCurrentVersion() throws Exception { } public void testSerializationWithRemoteIndicesThrowsOnUnsupportedVersions() throws IOException { - final TransportVersion versionBeforeRemoteIndices = TransportVersionUtils.getPreviousVersion(TransportVersion.V_8_8_0); + final TransportVersion versionBeforeRemoteIndices = TransportVersionUtils.getPreviousVersion(TransportVersions.V_8_8_0); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, versionBeforeRemoteIndices ); final BytesStreamOutput output = new BytesStreamOutput(); @@ -560,7 +561,7 @@ public void testSerializationWithWorkflowsRestrictionAndUnsupportedVersions() th final TransportVersion versionBeforeWorkflowsRestriction = TransportVersionUtils.getPreviousVersion(WORKFLOWS_RESTRICTION_VERSION); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, versionBeforeWorkflowsRestriction ); final BytesStreamOutput output = new BytesStreamOutput(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java index 50370c4119055..36562474d036b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/user/InternalUsersTests.java @@ -18,12 +18,15 @@ import org.elasticsearch.action.admin.indices.delete.DeleteIndexAction; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeAction; import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction; +import org.elasticsearch.action.admin.indices.readonly.AddIndexBlockAction; import org.elasticsearch.action.admin.indices.refresh.RefreshAction; import org.elasticsearch.action.admin.indices.refresh.TransportUnpromotableShardRefreshAction; import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction; import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction; import org.elasticsearch.action.admin.indices.template.put.PutComponentTemplateAction; import org.elasticsearch.action.bulk.BulkAction; +import org.elasticsearch.action.downsample.DownsampleAction; import org.elasticsearch.action.get.GetAction; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; @@ -240,7 +243,18 @@ public void testDataStreamLifecycleUser() { RolloverAction.NAME, DeleteIndexAction.NAME, ForceMergeAction.NAME, - IndicesStatsAction.NAME + IndicesStatsAction.NAME, + UpdateSettingsAction.NAME, + DownsampleAction.NAME, + AddIndexBlockAction.NAME + ); + + final List sampleSystemDataStreamActions = List.of( + RolloverAction.NAME, + DeleteIndexAction.NAME, + ForceMergeAction.NAME, + IndicesStatsAction.NAME, + UpdateSettingsAction.NAME ); final String dataStream = randomAlphaOfLengthBetween(3, 12); checkIndexAccess(role, randomFrom(sampleIndexActions), dataStream, true); @@ -253,16 +267,16 @@ public void testDataStreamLifecycleUser() { ); allowedSystemDataStreams.forEach(allowedSystemDataStream -> { - checkIndexAccess(role, randomFrom(sampleIndexActions), allowedSystemDataStream, true); + checkIndexAccess(role, randomFrom(sampleSystemDataStreamActions), allowedSystemDataStream, true); checkIndexAccess( role, - randomFrom(sampleIndexActions), + randomFrom(sampleSystemDataStreamActions), DataStream.BACKING_INDEX_PREFIX + allowedSystemDataStream + randomAlphaOfLengthBetween(4, 8), true ); }); - checkIndexAccess(role, randomFrom(sampleIndexActions), randomFrom(TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES), false); + checkIndexAccess(role, randomFrom(sampleSystemDataStreamActions), randomFrom(TestRestrictedIndices.SAMPLE_RESTRICTED_NAMES), false); } public void testRegularUser() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfoTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfoTests.java index e76f74371151c..dbf416ddbe1bc 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfoTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ssl/cert/CertificateInfoTests.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.core.ssl.cert; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.InputStreamStreamInput; @@ -94,12 +94,12 @@ public void testMissingIssuer() throws Exception { // send from old ByteArrayOutputStream outBuffer = new ByteArrayOutputStream(); OutputStreamStreamOutput out = new OutputStreamStreamOutput(outBuffer); - out.setTransportVersion(TransportVersion.V_8_3_0); + out.setTransportVersion(TransportVersions.V_8_3_0); certInfo.writeTo(out); // receive from old ByteArrayInputStream inBuffer = new ByteArrayInputStream(outBuffer.toByteArray()); StreamInput in = new InputStreamStreamInput(inBuffer); - in.setTransportVersion(TransportVersion.V_8_3_0); + in.setTransportVersion(TransportVersions.V_8_3_0); CertificateInfo certInfoFromOld = new CertificateInfo(in); // convert to a JSON string String toXContentString = Strings.toString(certInfoFromOld.toXContent(XContentFactory.jsonBuilder(), ToXContent.EMPTY_PARAMS)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java index f2328d2837d03..bc38a1aa007b5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/termsenum/TransportTermsEnumActionTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.core.termsenum; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; @@ -69,7 +70,7 @@ public void onFailure(final Exception e) { */ public void testCCSCheckCompatibility() throws Exception { TermsEnumRequest request = new TermsEnumRequest().field("field").timeout(TimeValue.timeValueSeconds(5)); - TransportVersion version = TransportVersionUtils.getNextVersion(TransportVersion.MINIMUM_CCS_VERSION, true); + TransportVersion version = TransportVersionUtils.getNextVersion(TransportVersions.MINIMUM_CCS_VERSION, true); request.indexFilter(new DummyQueryBuilder() { @Override public TransportVersion getMinimalSupportedVersion() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java index dc5e863346b52..da65edb957b94 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.core.transform; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.logging.DeprecationCategory; @@ -104,6 +105,6 @@ public static MockDeprecatedAggregationBuilder fromXContent(XContentParser p) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedQueryBuilder.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedQueryBuilder.java index 5f55c3bc40c91..145263beb0eb3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedQueryBuilder.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/MockDeprecatedQueryBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -87,6 +88,6 @@ protected int doHashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssueTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssueTests.java index 9595ad9f35787..9a16ef41e959b 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssueTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformHealthIssueTests.java @@ -7,7 +7,8 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -50,9 +51,9 @@ public void testMissingTypePre88() throws IOException { TransformHealthIssue deserializedIssue = copyInstance( originalIssue, getNamedWriteableRegistry(), - (out, value) -> value.writeTo(out), - in -> new TransformHealthIssue(in), - TransportVersion.V_8_7_0 + StreamOutput::writeWriteable, + TransformHealthIssue::new, + TransportVersions.V_8_7_0 ); assertThat(deserializedIssue.getType(), is(equalTo("unknown"))); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java index b8a71359c2def..e01549032be5e 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/TransformStateTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.Writeable.Reader; @@ -92,10 +92,10 @@ public void testBackwardsSerialization() throws IOException { null ); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setTransportVersion(TransportVersion.V_7_5_0); + output.setTransportVersion(TransportVersions.V_7_5_0); state.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - in.setTransportVersion(TransportVersion.V_7_5_0); + in.setTransportVersion(TransportVersions.V_7_5_0); TransformState streamedState = new TransformState(in); assertEquals(expectedState, streamedState); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java index 9bfe4df047c35..ff92bf7e4df91 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/transform/transforms/pivot/DateHistogramGroupSourceTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.core.transform.transforms.pivot; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Rounding; import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.common.io.stream.StreamInput; @@ -101,10 +101,10 @@ public void testBackwardsSerialization72() throws IOException { ); try (BytesStreamOutput output = new BytesStreamOutput()) { - output.setTransportVersion(TransportVersion.V_7_2_0); + output.setTransportVersion(TransportVersions.V_7_2_0); groupSource.writeTo(output); try (StreamInput in = output.bytes().streamInput()) { - in.setTransportVersion(TransportVersion.V_7_2_0); + in.setTransportVersion(TransportVersions.V_7_2_0); DateHistogramGroupSource streamedGroupSource = new DateHistogramGroupSource(in); assertEquals(groupSource, streamedGroupSource); } diff --git a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json index f098e7ce6d96b..5de8ce4bef402 100644 --- a/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json +++ b/x-pack/plugin/core/template-resources/src/main/resources/fleet-agents.json @@ -59,15 +59,8 @@ } }, "components": { - "type": "nested", - "properties": { - "id": { - "type": "keyword" - }, - "status": { - "type": "keyword" - } - } + "type": "object", + "enabled": false }, "last_updated": { "type": "date" diff --git a/x-pack/plugin/deprecation/qa/rest/src/main/java/org/elasticsearch/xpack/deprecation/TestDeprecatedQueryBuilder.java b/x-pack/plugin/deprecation/qa/rest/src/main/java/org/elasticsearch/xpack/deprecation/TestDeprecatedQueryBuilder.java index a2400cab1fedb..205a699d23593 100644 --- a/x-pack/plugin/deprecation/qa/rest/src/main/java/org/elasticsearch/xpack/deprecation/TestDeprecatedQueryBuilder.java +++ b/x-pack/plugin/deprecation/qa/rest/src/main/java/org/elasticsearch/xpack/deprecation/TestDeprecatedQueryBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -91,6 +92,6 @@ protected boolean doEquals(TestDeprecatedQueryBuilder other) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.ZERO; + return TransportVersions.ZERO; } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java index 8ec978d175f22..0351db53b6c69 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/DeprecationInfoAction.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.deprecation; import org.elasticsearch.ElasticsearchStatusException; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.action.ActionType; @@ -151,11 +151,11 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); - clusterSettingsIssues = in.readList(DeprecationIssue::new); - nodeSettingsIssues = in.readList(DeprecationIssue::new); + clusterSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); + nodeSettingsIssues = in.readCollectionAsList(DeprecationIssue::new); indexSettingsIssues = in.readMapOfLists(DeprecationIssue::new); - if (in.getTransportVersion().before(TransportVersion.V_7_11_0)) { - List mlIssues = in.readList(DeprecationIssue::new); + if (in.getTransportVersion().before(TransportVersions.V_7_11_0)) { + List mlIssues = in.readCollectionAsList(DeprecationIssue::new); pluginSettingsIssues = new HashMap<>(); pluginSettingsIssues.put("ml_settings", mlIssues); } else { @@ -201,13 +201,13 @@ public Map> getPluginSettingsIssues() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(clusterSettingsIssues); - out.writeList(nodeSettingsIssues); - out.writeMapOfLists(indexSettingsIssues, StreamOutput::writeString, (o, v) -> v.writeTo(o)); - if (out.getTransportVersion().before(TransportVersion.V_7_11_0)) { - out.writeList(pluginSettingsIssues.getOrDefault("ml_settings", Collections.emptyList())); + out.writeCollection(clusterSettingsIssues); + out.writeCollection(nodeSettingsIssues); + out.writeMap(indexSettingsIssues, StreamOutput::writeCollection); + if (out.getTransportVersion().before(TransportVersions.V_7_11_0)) { + out.writeCollection(pluginSettingsIssues.getOrDefault("ml_settings", Collections.emptyList())); } else { - out.writeMapOfLists(pluginSettingsIssues, StreamOutput::writeString, (o, v) -> v.writeTo(o)); + out.writeMap(pluginSettingsIssues, StreamOutput::writeCollection); } } diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java index 2737464cd8398..9ee62fbca057c 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckAction.java @@ -58,7 +58,7 @@ public static class NodeResponse extends BaseNodeResponse { public NodeResponse(StreamInput in) throws IOException { super(in); - deprecationIssues = in.readList(DeprecationIssue::new); + deprecationIssues = in.readCollectionAsList(DeprecationIssue::new); } public NodeResponse(DiscoveryNode node, List deprecationIssues) { @@ -69,7 +69,7 @@ public NodeResponse(DiscoveryNode node, List deprecationIssues @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeList(this.deprecationIssues); + out.writeCollection(this.deprecationIssues); } public List getDeprecationIssues() { diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckResponse.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckResponse.java index 4708ae8eb90c5..c07610484fff6 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckResponse.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/NodesDeprecationCheckResponse.java @@ -33,12 +33,12 @@ public NodesDeprecationCheckResponse( @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodesDeprecationCheckAction.NodeResponse::new); + return in.readCollectionAsList(NodesDeprecationCheckAction.NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationCacheResetAction.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationCacheResetAction.java index c69e4e2903124..3636ef0a960bd 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationCacheResetAction.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationCacheResetAction.java @@ -81,12 +81,12 @@ public Response(ClusterName clusterName, List nodes, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml index 5097a7fabd901..70076562b27dd 100644 --- a/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml +++ b/x-pack/plugin/downsample/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/downsample/10_basic.yml @@ -1363,3 +1363,132 @@ setup: - match: { hits.hits.2._source.k8s.pod.value.value_count: 4 } - match: { hits.hits.2._source.k8s.pod.label: "xyz" } - match: { hits.hits.2._source.k8s.pod.unmapped: "xyz" } + + +--- +"Downsample label with ignore_above": + - skip: + version: " - 8.7.99" + reason: "Downsample of time series index without metric allowed from version 8.8.0" + + - do: + indices.create: + index: test-label-ignore-above + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + index: + mode: time_series + routing_path: [metricset, k8s.pod.uid] + time_series: + start_time: 2021-04-28T00:00:00Z + end_time: 2021-04-29T00:00:00Z + mappings: + properties: + "@timestamp": + type: date + metricset: + type: keyword + time_series_dimension: true + k8s: + properties: + pod: + properties: + uid: + type: keyword + time_series_dimension: true + label: + type: keyword + ignore_above: 3 + gauge: + type: long + time_series_metric: gauge + - do: + bulk: + refresh: true + index: test-label-ignore-above + body: + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "label": "foo", "value": 10 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cat", "uid":"947e4ced-1786-4e53-9e0c-5c447e959507", "label": "foofoo", "value": 20 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "label": "foobar", "value": 10 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "dog", "uid":"df3145b3-0563-4d3b-a0f7-897eb2876ea9", "label": "foo", "value": 20 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "fox", "uid":"7393ef8e-489c-11ee-be56-0242ac120002", "label": "foo", "value": 10 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "fox", "uid":"7393ef8e-489c-11ee-be56-0242ac120002", "label": "bar", "value": 20 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:50:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"a81ef23a-489c-11ee-be56-0242ac120005", "label": "foobar", "value": 10 }}}' + - '{"index": {}}' + - '{"@timestamp": "2021-04-28T18:51:04.467Z", "metricset": "pod", "k8s": {"pod": {"name": "cow", "uid":"a81ef23a-489c-11ee-be56-0242ac120005", "label": "barfoo", "value": 20 }}}' + + - do: + indices.put_settings: + index: test-label-ignore-above + body: + index.blocks.write: true + + - do: + indices.downsample: + index: test-label-ignore-above + target_index: test-downsample-label-ignore-above + body: > + { + "fixed_interval": "1h" + } + + - is_true: acknowledged + + - do: + search: + index: test-downsample-label-ignore-above + body: + sort: [ "_tsid", "@timestamp" ] + + - length: { hits.hits: 4 } + + - match: { hits.hits.0._source._doc_count: 2 } + - match: { hits.hits.0._source.metricset: pod } + - match: { hits.hits.0._source.k8s.pod.name: fox } + - match: { hits.hits.0._source.k8s.pod.value: 20 } + - match: { hits.hits.0._source.k8s.pod.uid: 7393ef8e-489c-11ee-be56-0242ac120002 } + - match: { hits.hits.0._source.k8s.pod.label: bar } + - match: { hits.hits.0._source.@timestamp: 2021-04-28T18:00:00.000Z } + + - match: { hits.hits.1._source._doc_count: 2 } + - match: { hits.hits.1._source.metricset: pod } + - match: { hits.hits.1._source.k8s.pod.name: cat } + - match: { hits.hits.1._source.k8s.pod.value: 20 } + - match: { hits.hits.1._source.k8s.pod.uid: 947e4ced-1786-4e53-9e0c-5c447e959507 } + # NOTE: when downsampling a label field we propagate the last (most-recent timestamp-wise) non-null value, + # ignoring/skipping null values. Here the last document has a value that hits ignore_above ("foofoo") and, + # as a result, we propagate the value of the previous document ("foo") + - match: { hits.hits.1._source.k8s.pod.label: foo } + - match: { hits.hits.1._source.@timestamp: 2021-04-28T18:00:00.000Z } + + - match: { hits.hits.2._source._doc_count: 2 } + - match: { hits.hits.2._source.metricset: pod } + - match: { hits.hits.2._source.k8s.pod.name: cow } + - match: { hits.hits.2._source.k8s.pod.value: 20 } + - match: { hits.hits.2._source.k8s.pod.uid: a81ef23a-489c-11ee-be56-0242ac120005 } + - match: { hits.hits.2._source.k8s.pod.label: null } + - match: { hits.hits.2._source.@timestamp: 2021-04-28T18:00:00.000Z } + + - match: { hits.hits.3._source._doc_count: 2 } + - match: { hits.hits.3._source.metricset: pod } + - match: { hits.hits.3._source.k8s.pod.name: dog } + - match: { hits.hits.3._source.k8s.pod.value: 20 } + - match: { hits.hits.3._source.k8s.pod.uid: df3145b3-0563-4d3b-a0f7-897eb2876ea9 } + - match: { hits.hits.3._source.k8s.pod.label: foo } + - match: { hits.hits.3._source.@timestamp: 2021-04-28T18:00:00.000Z } + + - do: + indices.get_mapping: + index: test-downsample-label-ignore-above + + - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.type: keyword } + - match: { test-downsample-label-ignore-above.mappings.properties.k8s.properties.pod.properties.label.ignore_above: 3 } diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java index 0c947bf22133b..faa67479cc0d5 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java @@ -107,6 +107,7 @@ public void testDownsampling() throws Exception { assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); // the last downsampling round must remain in the data stream assertThat(dsBackingIndices.get(0), is(tenSecondsDownsampleIndex)); + assertThat(indexExists(oneSecondDownsampleIndex), is(false)); }, 30, TimeUnit.SECONDS); } diff --git a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java index d4e0cb7c8e5b9..4f33ae0966abf 100644 --- a/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java +++ b/x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/ILMDownsampleDisruptionIT.java @@ -210,8 +210,14 @@ private void startDownsampleTaskViaIlm( var request = new UpdateSettingsRequest(sourceIndex).settings( Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, POLICY_NAME) ); + // Updating index.lifecycle.name setting may fail due to the rolling restart itself, + // we need to attempt it in a assertBusy(...) assertBusy(() -> { try { + if (indexExists(sourceIndex) == false) { + logger.info("The source index [{}] no longer exists, downsampling likely completed", sourceIndex); + return; + } client().admin().indices().updateSettings(request).actionGet(TimeValue.timeValueSeconds(10)); } catch (Exception e) { logger.warn(() -> format("encountered failure while updating [%s] index's ilm policy", sourceIndex), e); diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java index 0260fd2df5750..35af9d9cfb435 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardIndexer.java @@ -157,7 +157,6 @@ public DownsampleIndexerAction.ShardDownsampleResponse execute() throws IOExcept TimeSeriesBucketCollector bucketCollector = new TimeSeriesBucketCollector(bulkProcessor); bucketCollector.preCollection(); timeSeriesSearcher.search(initialStateQuery, bucketCollector); - bucketCollector.postCollection(); } logger.info( diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java index 2962ed9e3ae3d..6a4ee88a0cdef 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardTaskParams.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.downsample; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.downsample.DownsampleConfig; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -90,7 +91,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_500_054; + return TransportVersions.V_8_500_054; } @Override diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index 8d8865c20b047..20a3095a2a3ce 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -595,12 +595,12 @@ private static void addMetricFieldMapping(final XContentBuilder builder, final S // only one value (the last value of the counter) builder.startObject(field).field("type", fieldProperties.get("type")).field(TIME_SERIES_METRIC_PARAM, metricType).endObject(); } else { - final List supportedAggs = List.of(metricType.supportedAggs()); + final String[] supportedAggsArray = metricType.supportedAggs(); // We choose max as the default metric - final String defaultMetric = supportedAggs.contains("max") ? "max" : supportedAggs.get(0); + final String defaultMetric = List.of(supportedAggsArray).contains("max") ? "max" : supportedAggsArray[0]; builder.startObject(field) .field("type", AggregateDoubleMetricFieldMapper.CONTENT_TYPE) - .stringListField(AggregateDoubleMetricFieldMapper.Names.METRICS, supportedAggs) + .array(AggregateDoubleMetricFieldMapper.Names.METRICS, supportedAggsArray) .field(AggregateDoubleMetricFieldMapper.Names.DEFAULT_METRIC, defaultMetric) .field(TIME_SERIES_METRIC_PARAM, metricType) .endObject(); @@ -680,17 +680,19 @@ static IndexMetadata.Builder copyIndexMetadata( } /* - * Add the source index name and UUID to the downsample index metadata. - * If the source index is a downsample index, we will add the name and UUID + * Add the origin index name and UUID to the downsample index metadata. + * If the origin index is a downsample index, we will add the name and UUID * of the first index that we initially rolled up. */ - if (IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_UUID.exists(sourceIndexMetadata.getSettings()) == false - || IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.exists(sourceIndexMetadata.getSettings()) == false) { - Index sourceIndex = sourceIndexMetadata.getIndex(); - targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey(), sourceIndex.getName()) - .put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_UUID.getKey(), sourceIndex.getUUID()); + Index sourceIndex = sourceIndexMetadata.getIndex(); + if (IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID.exists(sourceIndexMetadata.getSettings()) == false + || IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.exists(sourceIndexMetadata.getSettings()) == false) { + targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey(), sourceIndex.getName()) + .put(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID.getKey(), sourceIndex.getUUID()); } + targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY, sourceIndex.getName()); + targetSettings.put(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_UUID_KEY, sourceIndex.getUUID()); return IndexMetadata.builder(downsampleIndexMetadata).settings(targetSettings); } diff --git a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java index 1831cdadc2cbe..cf598f9aa3e87 100644 --- a/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java +++ b/x-pack/plugin/downsample/src/test/java/org/elasticsearch/xpack/downsample/DownsampleActionSingleNodeTests.java @@ -33,6 +33,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.network.NetworkAddress; @@ -339,7 +340,7 @@ public void testDownsampleOfDownsample() throws IOException { String downsampleIndex2 = downsampleIndex + "-2"; DownsampleConfig config2 = new DownsampleConfig(DateHistogramInterval.minutes(intervalMinutes * randomIntBetween(2, 50))); downsample(downsampleIndex, downsampleIndex2, config2); - assertDownsampleIndex(sourceIndex, downsampleIndex2, config2); + assertDownsampleIndex(downsampleIndex, downsampleIndex2, config2); } private Date randomDate() { @@ -1278,6 +1279,18 @@ private void assertDownsampleIndexSettings(String sourceIndex, String downsample assertEquals(sourceIndex, downsampleSettings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME_KEY)); assertEquals(sourceSettings.get(IndexSettings.MODE.getKey()), downsampleSettings.get(IndexSettings.MODE.getKey())); + if (Strings.hasText(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.get(sourceSettings))) { + // if the source is a downsample index itself, we're in the "downsample of downsample" test case and both indices should have + // the same ORIGIN configured + assertEquals( + IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.get(sourceSettings), + IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.get(downsampleSettings) + ); + assertEquals( + IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID.get(sourceSettings), + IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_UUID.get(downsampleSettings) + ); + } assertNotNull(sourceSettings.get(IndexSettings.TIME_SERIES_START_TIME.getKey())); assertNotNull(downsampleSettings.get(IndexSettings.TIME_SERIES_START_TIME.getKey())); assertEquals( diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index 9630dc0547d85..60cc7c847b5e3 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -209,6 +209,7 @@ public Collection createComponents( EnrichPolicyExecutor enrichPolicyExecutor = new EnrichPolicyExecutor( settings, clusterService, + indicesService, client, threadPool, expressionResolver, diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java index 011635783a2db..ecb03615307f9 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutor.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; @@ -41,6 +42,7 @@ public class EnrichPolicyExecutor { public static final String TASK_ACTION = "policy_execution"; private final ClusterService clusterService; + private final IndicesService indicesService; private final Client client; private final ThreadPool threadPool; private final IndexNameExpressionResolver indexNameExpressionResolver; @@ -54,6 +56,7 @@ public class EnrichPolicyExecutor { public EnrichPolicyExecutor( Settings settings, ClusterService clusterService, + IndicesService indicesService, Client client, ThreadPool threadPool, IndexNameExpressionResolver indexNameExpressionResolver, @@ -61,6 +64,7 @@ public EnrichPolicyExecutor( LongSupplier nowSupplier ) { this.clusterService = clusterService; + this.indicesService = indicesService; this.client = client; this.threadPool = threadPool; this.indexNameExpressionResolver = indexNameExpressionResolver; @@ -215,6 +219,7 @@ private Runnable createPolicyRunner( task, listener, clusterService, + indicesService, client, indexNameExpressionResolver, enrichIndexName, diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java index 26dad4335f6c5..7d1c52cf8a1f6 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyMaintenanceService.java @@ -110,7 +110,7 @@ protected void scheduleNext() { if (isMaster) { try { TimeValue waitTime = EnrichPlugin.ENRICH_CLEANUP_PERIOD.get(settings); - cancellable = threadPool.schedule(this::execute, waitTime, ThreadPool.Names.GENERIC); + cancellable = threadPool.schedule(this::execute, waitTime, threadPool.generic()); } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { logger.debug("Failed to schedule next [enrich] maintenance task; Shutting down", e); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java index 9b452af99df8b..a3c3e65171d83 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunner.java @@ -33,19 +33,24 @@ import org.elasticsearch.client.internal.FilterClient; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.iterable.Iterables; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.mapper.Mapper; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.reindex.BulkByScrollResponse; import org.elasticsearch.index.reindex.ReindexRequest; import org.elasticsearch.index.reindex.ScrollableHitSource; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.TaskCancelledException; import org.elasticsearch.xcontent.XContentBuilder; @@ -84,6 +89,7 @@ public class EnrichPolicyRunner implements Runnable { private final ExecuteEnrichPolicyTask task; private final ActionListener listener; private final ClusterService clusterService; + private final IndicesService indicesService; private final Client client; private final IndexNameExpressionResolver indexNameExpressionResolver; private final String enrichIndexName; @@ -96,6 +102,7 @@ public class EnrichPolicyRunner implements Runnable { ExecuteEnrichPolicyTask task, ActionListener listener, ClusterService clusterService, + IndicesService indicesService, Client client, IndexNameExpressionResolver indexNameExpressionResolver, String enrichIndexName, @@ -107,6 +114,7 @@ public class EnrichPolicyRunner implements Runnable { this.task = Objects.requireNonNull(task); this.listener = Objects.requireNonNull(listener); this.clusterService = Objects.requireNonNull(clusterService); + this.indicesService = indicesService; this.client = wrapClient(client, policyName, task, clusterService); this.indexNameExpressionResolver = Objects.requireNonNull(indexNameExpressionResolver); this.enrichIndexName = enrichIndexName; @@ -342,6 +350,7 @@ static Map mappingForMatchField(EnrichPolicy policy, List> sourceMappings) { Map> fieldMappings = new HashMap<>(); Map mappingForMatchField = mappingForMatchField(policy, sourceMappings); + MapperService mapperService = createMapperServiceForValidation(indicesService, enrichIndexName); for (String enrichField : policy.getEnrichFields()) { if (enrichField.equals(policy.getMatchField())) { mappingForMatchField = new HashMap<>(mappingForMatchField); @@ -354,7 +363,9 @@ private XContentBuilder createEnrichMapping(List> sourceMapp if (typeAndFormat.format != null) { mapping.put("format", typeAndFormat.format); } - mapping.put("index", false); // disable index + if (isIndexableField(mapperService, enrichField, typeAndFormat.type, mapping)) { + mapping.put("index", false); + } fieldMappings.put(enrichField, mapping); } } @@ -397,6 +408,27 @@ private XContentBuilder createEnrichMapping(List> sourceMapp } } + private static MapperService createMapperServiceForValidation(IndicesService indicesService, String index) { + try { + final Settings idxSettings = Settings.builder() + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()) + .build(); + IndexMetadata indexMetadata = IndexMetadata.builder(index).settings(idxSettings).numberOfShards(1).numberOfReplicas(0).build(); + return indicesService.createIndexMapperServiceForValidation(indexMetadata); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private static boolean isIndexableField(MapperService mapperService, String field, String type, Map properties) { + properties = new HashMap<>(properties); + properties.put("index", false); + Mapper.TypeParser parser = mapperService.getMapperRegistry().getMapperParser(type, IndexVersion.current()); + parser.parse(field, properties, mapperService.parserContext()); + return properties.containsKey("index") == false; + } + private void prepareAndCreateEnrichIndex(List> mappings) { Settings enrichIndexSettings = Settings.builder() .put("index.number_of_shards", 1) diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorStatsAction.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorStatsAction.java index 12cf251c63f92..7b2bc64b62c98 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorStatsAction.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/action/EnrichCoordinatorStatsAction.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.enrich.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionType; import org.elasticsearch.action.FailedNodeException; import org.elasticsearch.action.support.ActionFilters; @@ -79,12 +79,12 @@ public static class Response extends BaseNodesResponse { @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeResponse::new); + return in.readCollectionAsList(NodeResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } @@ -101,7 +101,7 @@ public static class NodeResponse extends BaseNodeResponse { NodeResponse(StreamInput in) throws IOException { super(in); - this.cacheStats = in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0) + this.cacheStats = in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0) ? new EnrichStatsAction.Response.CacheStats(in) : null; this.coordinatorStats = new CoordinatorStats(in); @@ -118,7 +118,7 @@ public EnrichStatsAction.Response.CacheStats getCacheStats() { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { cacheStats.writeTo(out); } coordinatorStats.writeTo(out); diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java index 5213736904954..e7a022e841a85 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java @@ -77,6 +77,7 @@ public void testNonConcurrentPolicyCoordination() throws InterruptedException { final EnrichPolicyExecutor testExecutor = new EnrichPolicyExecutor( Settings.EMPTY, null, + null, client, testThreadPool, TestIndexNameExpressionResolver.newInstance(testThreadPool.getThreadContext()), @@ -132,6 +133,7 @@ public void testMaximumPolicyExecutionLimit() throws InterruptedException { final EnrichPolicyExecutor testExecutor = new EnrichPolicyExecutor( testSettings, null, + null, client, testThreadPool, TestIndexNameExpressionResolver.newInstance(testThreadPool.getThreadContext()), @@ -266,6 +268,7 @@ protected void final EnrichPolicyExecutor testExecutor = new EnrichPolicyExecutor( Settings.EMPTY, null, + null, client, testThreadPool, TestIndexNameExpressionResolver.newInstance(testThreadPool.getThreadContext()), @@ -389,6 +392,7 @@ public void testRunPolicyLocallyMissingPolicy() { Settings.EMPTY, clusterService, null, + null, testThreadPool, TestIndexNameExpressionResolver.newInstance(testThreadPool.getThreadContext()), new EnrichPolicyLocks(), diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java index b86171a97ea3b..d8e582c9fb880 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyRunnerTests.java @@ -45,6 +45,7 @@ import org.elasticsearch.index.engine.Segment; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.indices.IndicesService; import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; @@ -1871,6 +1872,7 @@ public void onFailure(Exception e) { task, wrappedListener, clusterService, + getInstanceFromNode(IndicesService.class), client(), resolver, createdEnrichIndex, @@ -2157,6 +2159,100 @@ public void testEnrichMappingConflictFormats() { assertThat(hit1, equalTo(Map.of("user", "u2", "date", "2023-05"))); } + public void testEnrichObjectField() { + createIndex("source-1", Settings.EMPTY, "_doc", "id", "type=keyword", "name.first", "type=keyword", "name.last", "type=keyword"); + client().prepareIndex("source-1") + .setSource("user", "u1", "name.first", "F1", "name.last", "L1") + .setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE) + .get(); + EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of("source-1"), "user", List.of("name")); + String policyName = "test1"; + final long createTime = randomNonNegativeLong(); + String createdEnrichIndex = ".enrich-test1-" + createTime; + PlainActionFuture future = new PlainActionFuture<>(); + EnrichPolicyRunner enrichPolicyRunner = createPolicyRunner(policyName, policy, future, createdEnrichIndex); + enrichPolicyRunner.run(); + future.actionGet(); + + // Validate Index definition + GetIndexResponse enrichIndex = getGetIndexResponseAndCheck(createdEnrichIndex); + Map mapping = enrichIndex.getMappings().get(createdEnrichIndex).sourceAsMap(); + assertEnrichMapping(mapping, """ + { + "user": { + "type": "keyword", + "doc_values": false + }, + "name": { + "type": "object" + } + } + """); + SearchResponse searchResponse = client().search(new SearchRequest(".enrich-test1")).actionGet(); + ElasticsearchAssertions.assertHitCount(searchResponse, 1L); + Map hit0 = searchResponse.getHits().getAt(0).getSourceAsMap(); + assertThat(hit0, equalTo(Map.of("user", "u1", "name.first", "F1", "name.last", "L1"))); + } + + public void testEnrichNestedField() throws Exception { + final String sourceIndex = "source-index"; + XContentBuilder mappingBuilder = JsonXContent.contentBuilder(); + mappingBuilder.startObject() + .startObject(MapperService.SINGLE_MAPPING_NAME) + .startObject("properties") + .startObject("user") + .field("type", "keyword") + .endObject() + .startObject("nesting") + .field("type", "nested") + .startObject("properties") + .startObject("key") + .field("type", "keyword") + .endObject() + .endObject() + .endObject() + .startObject("field2") + .field("type", "integer") + .endObject() + .endObject() + .endObject() + .endObject(); + CreateIndexResponse createResponse = indicesAdmin().create(new CreateIndexRequest(sourceIndex).mapping(mappingBuilder)).actionGet(); + assertTrue(createResponse.isAcknowledged()); + + String policyName = "test1"; + List enrichFields = List.of("nesting", "field2"); + EnrichPolicy policy = new EnrichPolicy(EnrichPolicy.MATCH_TYPE, null, List.of(sourceIndex), "user", enrichFields); + + final long createTime = randomNonNegativeLong(); + String createdEnrichIndex = ".enrich-test1-" + createTime; + PlainActionFuture future = new PlainActionFuture<>(); + EnrichPolicyRunner enrichPolicyRunner = createPolicyRunner(policyName, policy, future, createdEnrichIndex); + + logger.info("Starting policy run"); + enrichPolicyRunner.run(); + future.actionGet(); + + // Validate Index definition + GetIndexResponse enrichIndex = getGetIndexResponseAndCheck(createdEnrichIndex); + Map mapping = enrichIndex.getMappings().get(createdEnrichIndex).sourceAsMap(); + assertEnrichMapping(mapping, """ + { + "user": { + "type": "keyword", + "doc_values": false + }, + "field2": { + "type": "integer", + "index": false + }, + "nesting": { + "type": "nested" + } + } + """); + } + private EnrichPolicyRunner createPolicyRunner( String policyName, EnrichPolicy policy, @@ -2220,6 +2316,7 @@ public void onFailure(Exception e) { task, wrappedListener, clusterService, + getInstanceFromNode(IndicesService.class), client, resolver, targetIndex, diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_search_application_put.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_search_application_put.yml index d6c5dfd32df60..1a6267ce21398 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_search_application_put.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/20_search_application_put.yml @@ -125,6 +125,7 @@ teardown: - do: search_application.get: name: test-search-application + - match: { indices: [ "test-index1", "test-index2" ] } - match: template: script: diff --git a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml index 3080e3225fcc1..a786d18959fb7 100644 --- a/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml +++ b/x-pack/plugin/ent-search/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/entsearch/30_search_application_get.yml @@ -64,6 +64,7 @@ teardown: name: test-search-application-1 - match: { name: "test-search-application-1" } + - match: { indices: [ "test-index1", "test-index2" ] } - match: { analytics_collection_name: "test-analytics" } - match: { template: { @@ -101,6 +102,7 @@ teardown: name: test-search-application-2 - match: { name: "test-search-application-2" } + - match: { indices: [ "test-index1", "test-index2" ] } - match: { analytics_collection_name: "test-analytics" } - match: { template: { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java index c4568da72c30b..acabe85af51b5 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/GetAnalyticsCollectionAction.java @@ -109,7 +109,7 @@ public static class Response extends ActionResponse implements ToXContentObject public Response(StreamInput in) throws IOException { super(in); - this.collections = in.readList(AnalyticsCollection::new); + this.collections = in.readCollectionAsList(AnalyticsCollection::new); } public Response(List collections) { @@ -134,7 +134,7 @@ public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params par @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(collections); + out.writeCollection(collections); } public List getAnalyticsCollections() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PostAnalyticsEventAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PostAnalyticsEventAction.java index bb041de9fa9fc..b5e810e30d125 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PostAnalyticsEventAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/analytics/action/PostAnalyticsEventAction.java @@ -94,7 +94,7 @@ public Request(StreamInput in) throws IOException { this.eventTime = in.readLong(); this.xContentType = in.readEnum(XContentType.class); this.payload = in.readBytesReference(); - this.headers = in.readMap(StreamInput::readStringList); + this.headers = in.readMap(StreamInput::readStringCollectionAsList); this.clientAddress = in.readOptionalString(); } @@ -187,7 +187,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeLong(eventTime); XContentHelper.writeTo(out, xContentType); out.writeBytesReference(payload); - out.writeMap(headers, StreamOutput::writeString, StreamOutput::writeStringCollection); + out.writeMap(headers, StreamOutput::writeStringCollection); out.writeOptionalString(clientAddress); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java index 04de4e6feaf47..d8486bcd930ec 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRule.java @@ -111,7 +111,7 @@ public QueryRule(String id, QueryRuleType type, List criteria public QueryRule(StreamInput in) throws IOException { this.id = in.readString(); this.type = QueryRuleType.queryRuleType(in.readString()); - this.criteria = in.readList(QueryRuleCriteria::new); + this.criteria = in.readCollectionAsList(QueryRuleCriteria::new); this.actions = in.readMap(); validate(); @@ -148,7 +148,7 @@ private void validatePinnedAction(Object action) { public void writeTo(StreamOutput out) throws IOException { out.writeString(id); out.writeString(type.toString()); - out.writeList(criteria); + out.writeCollection(criteria); out.writeGenericMap(actions); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java index ae0f4f2df8703..4bed3d0b5a352 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleCriteria.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.StreamInput; @@ -36,7 +37,7 @@ public class QueryRuleCriteria implements Writeable, ToXContentObject { - public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersion.V_8_500_046; + public static final TransportVersion CRITERIA_METADATA_VALUES_TRANSPORT_VERSION = TransportVersions.V_8_500_046; private final QueryRuleCriteriaType criteriaType; private final String criteriaMetadata; private final List criteriaValues; @@ -74,7 +75,7 @@ public QueryRuleCriteria(StreamInput in) throws IOException { this.criteriaType = in.readEnum(QueryRuleCriteriaType.class); if (in.getTransportVersion().onOrAfter(CRITERIA_METADATA_VALUES_TRANSPORT_VERSION)) { this.criteriaMetadata = in.readOptionalString(); - this.criteriaValues = in.readOptionalList(StreamInput::readGenericValue); + this.criteriaValues = in.readOptionalCollectionAsList(StreamInput::readGenericValue); } else { this.criteriaMetadata = in.readString(); this.criteriaValues = List.of(in.readGenericValue()); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleset.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleset.java index 10f50c20476a6..f58d01e7afe71 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleset.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRuleset.java @@ -56,7 +56,7 @@ public QueryRuleset(String id, List rules) { public QueryRuleset(StreamInput in) throws IOException { this.id = in.readString(); - this.rules = in.readList(QueryRule::new); + this.rules = in.readCollectionAsList(QueryRule::new); } private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( @@ -133,7 +133,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(id); - out.writeList(rules); + out.writeCollection(rules); } public String id() { diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java index bedfaae57498f..0a1ff919493c3 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/QueryRulesetListItem.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.application.rules; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -27,7 +28,7 @@ public class QueryRulesetListItem implements Writeable, ToXContentObject { // TODO we need to actually bump transport version, but there's no point until main is merged. Placeholder for now. - public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersion.V_8_500_052; + public static final TransportVersion EXPANDED_RULESET_COUNT_TRANSPORT_VERSION = TransportVersions.V_8_500_052; public static final ParseField RULESET_ID_FIELD = new ParseField("ruleset_id"); public static final ParseField RULE_TOTAL_COUNT_FIELD = new ParseField("rule_total_count"); diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java index 8203c6cadc7ec..0f445fe50e9d7 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/rules/RuleQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.get.GetRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; @@ -72,7 +73,7 @@ public class RuleQueryBuilder extends AbstractQueryBuilder { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_500_033; + return TransportVersions.V_8_500_033; } public RuleQueryBuilder(QueryBuilder organicQuery, Map matchCriteria, String rulesetId) { @@ -84,9 +85,9 @@ public RuleQueryBuilder(StreamInput in) throws IOException { organicQuery = in.readNamedWriteable(QueryBuilder.class); matchCriteria = in.readMap(); rulesetId = in.readString(); - pinnedIds = in.readOptionalStringList(); + pinnedIds = in.readOptionalStringCollectionAsList(); pinnedIdsSupplier = null; - pinnedDocs = in.readOptionalList(Item::new); + pinnedDocs = in.readOptionalCollectionAsList(Item::new); pinnedDocsSupplier = null; } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplication.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplication.java index c6668f813ca4e..1425f18eaa6cb 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplication.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplication.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.application.search; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.ReleasableBytesStreamOutput; @@ -52,7 +54,11 @@ public class SearchApplication implements Writeable, ToXContentObject { public static final String NO_TEMPLATE_STORED_WARNING = "Using default search application template which is subject to change. " + "We recommend storing a template to avoid breaking changes."; + public static final String NO_ALIAS_WARNING = "Alias is missing for the search application"; + private static final TransportVersion INDICES_REMOVED_TRANSPORT_VERSION = TransportVersions.V_8_500_069; private final String name; + + @Nullable private final String[] indices; private final long updatedAtMillis; private final String analyticsCollectionName; @@ -89,8 +95,17 @@ public SearchApplication( } public SearchApplication(StreamInput in) throws IOException { + this(in, null); + } + + public SearchApplication(StreamInput in, String[] indices) throws IOException { this.name = in.readString(); - this.indices = in.readStringArray(); + + if (in.getTransportVersion().onOrAfter(INDICES_REMOVED_TRANSPORT_VERSION)) { + this.indices = indices; // Uses the provided indices, as they are no longer serialized + } else { + this.indices = in.readStringArray(); // old behaviour, read it from input as it was serialized + } this.analyticsCollectionName = in.readOptionalString(); this.updatedAtMillis = in.readLong(); this.searchApplicationTemplate = in.readOptionalWriteable(SearchApplicationTemplate::new); @@ -99,7 +114,9 @@ public SearchApplication(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(name); - out.writeStringArray(indices); + if (out.getTransportVersion().before(INDICES_REMOVED_TRANSPORT_VERSION)) { + out.writeStringArray(indices); // old behaviour. New behaviour does not serialize indices, so no need to do anything else + } out.writeOptionalString(analyticsCollectionName); out.writeLong(updatedAtMillis); out.writeOptionalWriteable(searchApplicationTemplate); @@ -182,7 +199,11 @@ public static SearchApplication fromXContent(String resourceName, XContentParser @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + builder.field(NAME_FIELD.getPreferredName(), name); + if (indices != null) { + builder.field(INDICES_FIELD.getPreferredName(), indices); + } if (analyticsCollectionName != null) { builder.field(ANALYTICS_COLLECTION_NAME_FIELD.getPreferredName(), analyticsCollectionName); } diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java index 681a2ff5028a3..08b1e4f90c419 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/SearchApplicationIndexService.java @@ -201,11 +201,20 @@ public void getSearchApplication(String resourceName, ActionListener index.getName()) + .toArray(String[]::new); + } + private static String getSearchAliasName(SearchApplication app) { return app.name(); } @@ -421,7 +430,7 @@ private static SearchApplicationListItem hitToSearchApplicationListItem(SearchHi ); } - private SearchApplication parseSearchApplicationBinaryFromSource(BytesReference source) { + private SearchApplication parseSearchApplicationBinaryFromSource(BytesReference source, String[] indices) { try (XContentParser parser = XContentHelper.createParser(XContentParserConfiguration.EMPTY, source, XContentType.JSON)) { ensureExpectedToken(parser.nextToken(), XContentParser.Token.START_OBJECT, parser); while (parser.nextToken() != XContentParser.Token.END_OBJECT) { @@ -442,7 +451,7 @@ public int read() { try ( StreamInput in = new NamedWriteableAwareStreamInput(new InputStreamStreamInput(encodedIn), namedWriteableRegistry) ) { - return parseSearchApplicationBinaryWithVersion(in); + return parseSearchApplicationBinaryWithVersion(in, indices); } } else { XContentParserUtils.parseFieldsValue(parser); // consume and discard unknown fields @@ -456,11 +465,11 @@ public int read() { } } - static SearchApplication parseSearchApplicationBinaryWithVersion(StreamInput in) throws IOException { + static SearchApplication parseSearchApplicationBinaryWithVersion(StreamInput in, String[] indices) throws IOException { TransportVersion version = TransportVersion.readVersion(in); assert version.onOrBefore(TransportVersion.current()) : version + " >= " + TransportVersion.current(); in.setTransportVersion(version); - return new SearchApplication(in); + return new SearchApplication(in, indices); } static void writeSearchApplicationBinaryWithVersion(SearchApplication app, OutputStream os, TransportVersion minTransportVersion) diff --git a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java index cc5c8af4271cd..4c945c05ace84 100644 --- a/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java +++ b/x-pack/plugin/ent-search/src/main/java/org/elasticsearch/xpack/application/search/action/TransportGetSearchApplicationAction.java @@ -50,6 +50,9 @@ protected void doExecute( if (searchApplication.hasStoredTemplate() == false) { HeaderWarning.addWarning(SearchApplication.NO_TEMPLATE_STORED_WARNING); } + if (searchApplication.indices() == null || searchApplication.indices().length == 0) { + HeaderWarning.addWarning(SearchApplication.NO_ALIAS_WARNING); + } // Construct a new object to ensure we backfill the stored application with the default template return new GetSearchApplicationAction.Response( searchApplication.name(), diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/search/SearchApplicationTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/search/SearchApplicationTests.java index e61ed01df421d..60b88476285df 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/search/SearchApplicationTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/search/SearchApplicationTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.application.search; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.BytesStreamOutput; @@ -140,7 +140,7 @@ private SearchApplication assertIndexSerialization(SearchApplication testInstanc SearchApplicationIndexService.writeSearchApplicationBinaryWithVersion( testInstance, output, - TransportVersion.MINIMUM_COMPATIBLE + TransportVersions.MINIMUM_COMPATIBLE ); try ( StreamInput in = new NamedWriteableAwareStreamInput( @@ -148,7 +148,7 @@ private SearchApplication assertIndexSerialization(SearchApplication testInstanc namedWriteableRegistry ) ) { - deserializedInstance = SearchApplicationIndexService.parseSearchApplicationBinaryWithVersion(in); + deserializedInstance = SearchApplicationIndexService.parseSearchApplicationBinaryWithVersion(in, testInstance.indices()); } } assertNotSame(testInstance, deserializedInstance); diff --git a/x-pack/plugin/eql/qa/rest/build.gradle b/x-pack/plugin/eql/qa/rest/build.gradle index a49317ad8c4e4..c6a634ebfd549 100644 --- a/x-pack/plugin/eql/qa/rest/build.gradle +++ b/x-pack/plugin/eql/qa/rest/build.gradle @@ -1,6 +1,6 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-test' -apply plugin: 'elasticsearch.legacy-yaml-rest-compat-test' +apply plugin: 'elasticsearch.internal-java-rest-test' +apply plugin: 'elasticsearch.internal-yaml-rest-test' +apply plugin: 'elasticsearch.yaml-rest-compat-test' import org.elasticsearch.gradle.internal.info.BuildParams @@ -18,14 +18,18 @@ artifacts { restXpackTests(new File(projectDir, "src/yamlRestTest/resources/rest-api-spec/test")) } +tasks.named('javaRestTest') { + usesDefaultDistribution() +} +tasks.named('yamlRestTest') { + usesDefaultDistribution() +} +tasks.named('yamlRestTestV7CompatTest') { + usesDefaultDistribution() +} + if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } tasks.named("yamlRestTest").configure{enabled = false } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'basic' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'false' -} diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java index e196813f32e6c..2b9e3839dfaac 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlDateNanosIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlDateNanosSpecTestCase; +import org.junit.ClassRule; import java.util.List; public class EqlDateNanosIT extends EqlDateNanosSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlDateNanosIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java index c0a3c86106d29..ebaf30c365f22 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlExtraIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlExtraSpecTestCase; +import org.junit.ClassRule; import java.util.List; public class EqlExtraIT extends EqlExtraSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlExtraIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java index b1548355d06e6..416d1da8deafe 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlMissingEventsIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlMissingEventsSpecTestCase; +import org.junit.ClassRule; import java.util.List; public class EqlMissingEventsIT extends EqlMissingEventsSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlMissingEventsIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestIT.java index 4aa8d9332fcf5..ed019f5cb1317 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestIT.java @@ -10,10 +10,20 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlRestTestCase; +import org.junit.ClassRule; public class EqlRestIT extends EqlRestTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restClientSettings() { String token = basicAuthHeaderValue("admin", new SecureString("admin-password".toCharArray())); diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java index 0bcf90a35f54e..46fd25d2f163c 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlRestValidationTestCase; +import org.junit.ClassRule; import java.io.IOException; public class EqlRestValidationIT extends EqlRestValidationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected String getInexistentIndexErrorMessage() { return "\"root_cause\":[{\"type\":\"verification_exception\",\"reason\":\"Found 1 problem\\nline -1:-1: Unknown index "; diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java index bd40074b26de1..acc44688edd86 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlSampleTestCase; +import org.junit.ClassRule; import java.util.List; public class EqlSampleIT extends EqlSampleTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlSampleIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java index 077f66c6dd24e..6eccece6954f6 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSampleMultipleEntriesIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlSampleMultipleEntriesTestCase; +import org.junit.ClassRule; import java.util.List; public class EqlSampleMultipleEntriesIT extends EqlSampleMultipleEntriesTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlSampleMultipleEntriesIT( String query, String name, diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java index df966f3bbf776..c2313595d3dbc 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSpecIT.java @@ -7,12 +7,22 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlSpecTestCase; +import org.junit.ClassRule; import java.util.List; public class EqlSpecIT extends EqlSpecTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlSpecIT(String query, String name, List eventIds, String[] joinKeys, Integer size, Integer maxSamplesPerKey) { super(query, name, eventIds, joinKeys, size, maxSamplesPerKey); } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java index a973694960330..efa3866c8df90 100644 --- a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java @@ -7,8 +7,18 @@ package org.elasticsearch.xpack.eql; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.stats.EqlUsageRestTestCase; +import org.junit.ClassRule; public class EqlStatsIT extends EqlUsageRestTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + } diff --git a/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlTestCluster.java b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlTestCluster.java new file mode 100644 index 0000000000000..1aa72288a2e8a --- /dev/null +++ b/x-pack/plugin/eql/qa/rest/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlTestCluster.java @@ -0,0 +1,25 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; + +public class EqlTestCluster { + + public static ElasticsearchCluster getCluster() { + return ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.monitoring.collection.enabled", "true") + .setting("xpack.security.enabled", "false") + .build(); + } + +} diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/eql/EqlClientYamlIT.java b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/eql/EqlClientYamlIT.java index 521a1176fdbbe..0e5d862d4c860 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/eql/EqlClientYamlIT.java +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/java/org/elasticsearch/xpack/eql/EqlClientYamlIT.java @@ -9,11 +9,27 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; +import org.junit.ClassRule; public class EqlClientYamlIT extends ESClientYamlSuiteTestCase { + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.security.enabled", "false") + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + public EqlClientYamlIT(final ClientYamlTestCandidate testCandidate) { super(testCandidate); } diff --git a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml index 31eaedef59534..dc9fd1e475cc9 100644 --- a/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml +++ b/x-pack/plugin/eql/qa/rest/src/yamlRestTest/resources/rest-api-spec/test/eql/10_basic.yml @@ -454,3 +454,27 @@ setup: body: query: 'sequence with maxspan=10d [network where user == "ADMIN"] ![network where user == "SYSTEM"] [network where user == "ADMIN"]' - match: {hits.total.value: 0} + +--- +"Error message missing column - no suggestion": + + - do: + catch: "bad_request" + eql.search: + index: eql_test + body: + query: 'sequence with maxspan=10d [network where used == "ADMIN"] [network where id == 123]' + - match: { error.root_cause.0.type: "verification_exception" } + - match: { error.root_cause.0.reason: "Found 1 problem\nline 1:42: Unknown column [used]" } + +--- +"Error message missing column - did you mean functionality": + + - do: + catch: "bad_request" + eql.search: + index: eql_test + body: + query: 'sequence with maxspan=10d [network where user == "ADMIN"] ![network where used == "SYSTEM"]' + - match: { error.root_cause.0.type: "verification_exception" } + - match: { error.root_cause.0.reason: "Found 1 problem\nline 1:75: Unknown column [used], did you mean [user]?" } diff --git a/x-pack/plugin/eql/qa/security/build.gradle b/x-pack/plugin/eql/qa/security/build.gradle index bcc9d0cb4ef8b..0641c47273f0e 100644 --- a/x-pack/plugin/eql/qa/security/build.gradle +++ b/x-pack/plugin/eql/qa/security/build.gradle @@ -1,4 +1,4 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' import org.elasticsearch.gradle.internal.info.BuildParams @@ -6,19 +6,12 @@ dependencies { javaRestTestImplementation project(path: xpackModule('eql:qa:common')) } +tasks.named('javaRestTest') { + usesDefaultDistribution() +} + if (BuildParams.inFipsJvm){ // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC tasks.named("javaRestTest").configure{enabled = false } } -testClusters.configureEach { - testDistribution = 'DEFAULT' - setting 'xpack.license.self_generated.type', 'basic' - setting 'xpack.monitoring.collection.enabled', 'true' - setting 'xpack.security.enabled', 'true' - numberOfNodes = 2 - extraConfigFile 'roles.yml', file('roles.yml') - user username: "test-admin", password: 'x-pack-test-password', role: "test-admin" - user username: "user1", password: 'x-pack-test-password', role: "user1" - user username: "user2", password: 'x-pack-test-password', role: "user2" -} diff --git a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/AsyncEqlSecurityIT.java b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/AsyncEqlSecurityIT.java index d40545604f723..b3a52084419ab 100644 --- a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/AsyncEqlSecurityIT.java +++ b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/AsyncEqlSecurityIT.java @@ -13,12 +13,14 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; import org.elasticsearch.xpack.core.XPackPlugin; import org.elasticsearch.xpack.core.async.AsyncExecutionId; import org.junit.Before; +import org.junit.ClassRule; import java.io.IOException; @@ -30,6 +32,14 @@ public class AsyncEqlSecurityIT extends ESRestTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlSecurityTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + /** * All tests run as a superuser but use es-security-runas-user to become a less privileged user. */ diff --git a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java index 406345d1144ff..8887536379c3b 100644 --- a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java +++ b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlRestValidationIT.java @@ -8,13 +8,22 @@ package org.elasticsearch.xpack.eql; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.EqlRestValidationTestCase; +import org.junit.ClassRule; import java.io.IOException; import static org.elasticsearch.xpack.eql.SecurityUtils.secureClientSettings; public class EqlRestValidationIT extends EqlRestValidationTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlSecurityTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } @Override protected Settings restClientSettings() { diff --git a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSecurityTestCluster.java b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSecurityTestCluster.java new file mode 100644 index 0000000000000..a1a417d91aeb8 --- /dev/null +++ b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlSecurityTestCluster.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql; + +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; + +public class EqlSecurityTestCluster { + public static ElasticsearchCluster getCluster() { + return ElasticsearchCluster.local() + .nodes(2) + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.monitoring.collection.enabled", "true") + .setting("xpack.security.enabled", "true") + .configFile("roles.yml", Resource.fromClasspath("roles.yml")) + .user("test-admin", "x-pack-test-password", "test-admin", false) + .user("user1", "x-pack-test-password", "user1", false) + .user("user2", "x-pack-test-password", "user2", false) + .build(); + } +} diff --git a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java index 4449ff94eb8b5..5a1c7534b5b2c 100644 --- a/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java +++ b/x-pack/plugin/eql/qa/security/src/javaRestTest/java/org/elasticsearch/xpack/eql/EqlStatsIT.java @@ -9,7 +9,9 @@ import org.elasticsearch.client.Request; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.cluster.ElasticsearchCluster; import org.elasticsearch.test.eql.stats.EqlUsageRestTestCase; +import org.junit.ClassRule; import java.io.IOException; @@ -17,6 +19,14 @@ public class EqlStatsIT extends EqlUsageRestTestCase { + @ClassRule + public static final ElasticsearchCluster cluster = EqlSecurityTestCluster.getCluster(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + /** * All tests run as a superuser but use es-security-runas-user to become a less privileged user. */ diff --git a/x-pack/plugin/eql/qa/security/roles.yml b/x-pack/plugin/eql/qa/security/src/javaRestTest/resources/roles.yml similarity index 100% rename from x-pack/plugin/eql/qa/security/roles.yml rename to x-pack/plugin/eql/qa/security/src/javaRestTest/resources/roles.yml diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java index 6edc7087f82a5..5811d328ae7dd 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.eql.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.IndicesRequest; @@ -115,24 +115,24 @@ public EqlSearchRequest(StreamInput in) throws IOException { size = in.readVInt(); fetchSize = in.readVInt(); query = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { this.ccsMinimizeRoundtrips = in.readBoolean(); } this.waitForCompletionTimeout = in.readOptionalTimeValue(); this.keepAlive = in.readOptionalTimeValue(); this.keepOnCompletion = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_17_8)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_17_8)) { resultPosition = in.readString(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { if (in.readBoolean()) { - fetchFields = in.readList(FieldAndFormat::new); + fetchFields = in.readCollectionAsList(FieldAndFormat::new); } runtimeMappings = in.readMap(); } else { runtimeMappings = emptyMap(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { maxSamplesPerKey = in.readInt(); } } @@ -451,23 +451,23 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVInt(size); out.writeVInt(fetchSize); out.writeString(query); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_15_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_15_0)) { out.writeBoolean(ccsMinimizeRoundtrips); } out.writeOptionalTimeValue(waitForCompletionTimeout); out.writeOptionalTimeValue(keepAlive); out.writeBoolean(keepOnCompletion); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_17_8)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_17_8)) { out.writeString(resultPosition); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { - out.writeList(fetchFields); + out.writeCollection(fetchFields); } out.writeGenericMap(runtimeMappings); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeInt(maxSamplesPerKey); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java index cbabfb8381b7d..0640347a7ea91 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/action/EqlSearchResponse.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.eql.action; import org.apache.lucene.search.TotalHits; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -276,12 +276,12 @@ private Event(StreamInput in) throws IOException { index = in.readString(); id = in.readString(); source = in.readBytesReference(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0) && in.readBoolean()) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0) && in.readBoolean()) { fetchFields = in.readMap(DocumentField::new); } else { fetchFields = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_038)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_038)) { missing = in.readBoolean(); } else { missing = index.isEmpty(); @@ -298,13 +298,13 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(index); out.writeString(id); out.writeBytesReference(source); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeBoolean(fetchFields != null); if (fetchFields != null) { - out.writeMap(fetchFields, StreamOutput::writeString, (stream, documentField) -> documentField.writeTo(stream)); + out.writeMap(fetchFields, StreamOutput::writeWriteable); } } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_038)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_038)) { // for BWC, 8.9.1+ does not have "missing" attribute, but it considers events with an empty index "" as missing events // see https://github.com/elastic/elasticsearch/pull/98130 out.writeBoolean(missing); @@ -439,7 +439,7 @@ public Sequence(List joinKeys, List events) { @SuppressWarnings("unchecked") public Sequence(StreamInput in) throws IOException { this.joinKeys = (List) in.readGenericValue(); - this.events = in.readList(Event::readFrom); + this.events = in.readCollectionAsList(Event::readFrom); } public static Sequence fromXContent(XContentParser parser) { @@ -449,7 +449,7 @@ public static Sequence fromXContent(XContentParser parser) { @Override public void writeTo(StreamOutput out) throws IOException { out.writeGenericValue(joinKeys); - out.writeList(events); + out.writeCollection(events); } @Override @@ -521,8 +521,8 @@ public Hits(StreamInput in) throws IOException { } else { totalHits = null; } - events = in.readBoolean() ? in.readList(Event::readFrom) : null; - sequences = in.readBoolean() ? in.readList(Sequence::new) : null; + events = in.readBoolean() ? in.readCollectionAsList(Event::readFrom) : null; + sequences = in.readBoolean() ? in.readCollectionAsList(Sequence::new) : null; } @Override @@ -534,13 +534,13 @@ public void writeTo(StreamOutput out) throws IOException { } if (events != null) { out.writeBoolean(true); - out.writeList(events); + out.writeCollection(events); } else { out.writeBoolean(false); } if (sequences != null) { out.writeBoolean(true); - out.writeList(sequences); + out.writeCollection(sequences); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/BoxedQueryRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/BoxedQueryRequest.java index 63ff6cad2df7d..a8f74575f5b41 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/BoxedQueryRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/BoxedQueryRequest.java @@ -61,7 +61,7 @@ public BoxedQueryRequest(QueryRequest original, String timestamp, List k timestampField = timestamp; keys = keyNames; this.optionalKeyNames = optionalKeyNames; - RuntimeUtils.addFilter(timestampRange, searchSource); + RuntimeUtils.combineFilters(searchSource, timestampRange); } @Override @@ -181,7 +181,7 @@ public BoxedQueryRequest keys(List> values) { } } - RuntimeUtils.replaceFilter(keyFilters, newFilters, searchSource); + RuntimeUtils.replaceFilter(searchSource, keyFilters, newFilters); keyFilters = newFilters; return this; } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java index 7f977411d2466..4a7f521c20050 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/assembler/SampleQueryRequest.java @@ -128,7 +128,7 @@ public void multipleKeyPairs(List> values, List prev } } - RuntimeUtils.replaceFilter(multipleKeyFilters, newFilters, searchSource); + RuntimeUtils.replaceFilter(searchSource, multipleKeyFilters, newFilters); multipleKeyFilters = newFilters; } @@ -158,7 +158,7 @@ public void singleKeyPair(final List compositeKeyValues, int maxStages, } SearchSourceBuilder newSource = copySource(searchSource); - RuntimeUtils.replaceFilter(singleKeyPairFilters, newFilters, newSource); + RuntimeUtils.replaceFilter(newSource, singleKeyPairFilters, newFilters); // ask for the minimum needed to get at least N samplese per key int minResultsNeeded = maxStages + maxSamplesPerKey - 1; newSource.size(minResultsNeeded) diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java index cf76022d6a566..b5ac09cc39b9e 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/PITAwareQueryClient.java @@ -106,7 +106,7 @@ private void makeRequestPITCompatible(SearchRequest request) { if (CollectionUtils.isEmpty(indices) == false) { request.indices(Strings.EMPTY_ARRAY); QueryBuilder indexQuery = indices.length == 1 ? termQuery(GetResult._INDEX, indices[0]) : termsQuery(GetResult._INDEX, indices); - RuntimeUtils.addFilter(indexQuery, source); + RuntimeUtils.combineFilters(source, indexQuery); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java index 44dffb2d1dd75..aff398a523e93 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/search/RuntimeUtils.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.ql.expression.gen.pipeline.Pipe; import org.elasticsearch.xpack.ql.expression.gen.pipeline.ReferenceInput; import org.elasticsearch.xpack.ql.index.IndexResolver; +import org.elasticsearch.xpack.ql.util.Queries; import java.util.ArrayList; import java.util.Arrays; @@ -47,6 +48,7 @@ import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.xpack.ql.execution.search.extractor.AbstractFieldHitExtractor.MultiValueSupport.FULL; +import static org.elasticsearch.xpack.ql.util.Queries.Clause.FILTER; public final class RuntimeUtils { @@ -184,62 +186,29 @@ public static List searchHits(SearchResponse response) { return Arrays.asList(response.getHits().getHits()); } - // optimized method that adds filter to existing bool queries without additional wrapping - // additionally checks whether the given query exists for safe decoration - public static SearchSourceBuilder addFilter(QueryBuilder filter, SearchSourceBuilder source) { - BoolQueryBuilder bool = null; - QueryBuilder query = source.query(); - - if (query instanceof BoolQueryBuilder boolQueryBuilder) { - bool = boolQueryBuilder; - if (filter != null && bool.filter().contains(filter) == false) { - bool.filter(filter); - } - } else { - bool = boolQuery(); - if (query != null) { - bool.filter(query); - } - if (filter != null) { - bool.filter(filter); - } - - source.query(bool); - } - return source; + /** + * optimized method that adds filter to existing bool queries without additional wrapping + * additionally checks whether the given query exists for safe decoration + */ + public static SearchSourceBuilder combineFilters(SearchSourceBuilder source, QueryBuilder filter) { + var query = Queries.combine(FILTER, Arrays.asList(source.query(), filter)); + query = query == null ? boolQuery() : query; + return source.query(query); } public static SearchSourceBuilder replaceFilter( + SearchSourceBuilder source, List oldFilters, - List newFilters, - SearchSourceBuilder source + List newFilters ) { - BoolQueryBuilder bool = null; - QueryBuilder query = source.query(); - - if (query instanceof BoolQueryBuilder boolQueryBuilder) { - bool = boolQueryBuilder; - if (oldFilters != null) { - bool.filter().removeAll(oldFilters); - } - - if (newFilters != null) { - bool.filter().addAll(newFilters); - } - } - // no bool query means no old filters - else { - bool = boolQuery(); - if (query != null) { - bool.filter(query); - } - if (newFilters != null) { - bool.filter().addAll(newFilters); - } - - source.query(bool); - } - return source; + var query = source.query(); + query = removeFilters(query, oldFilters); + query = Queries.combine( + FILTER, + org.elasticsearch.xpack.ql.util.CollectionUtils.combine(Collections.singletonList(query), newFilters) + ); + query = query == null ? boolQuery() : query; + return source.query(query); } public static SearchSourceBuilder wrapAsFilter(SearchSourceBuilder source) { @@ -252,4 +221,13 @@ public static SearchSourceBuilder wrapAsFilter(SearchSourceBuilder source) { source.query(bool); return source; } + + public static QueryBuilder removeFilters(QueryBuilder query, List filters) { + if (query instanceof BoolQueryBuilder boolQueryBuilder) { + if (org.elasticsearch.xpack.ql.util.CollectionUtils.isEmpty(filters) == false) { + boolQueryBuilder.filter().removeAll(filters); + } + } + return query; + } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java index 51d05e93b9a61..04c64202fd3e4 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/execution/sequence/TumblingWindow.java @@ -48,7 +48,7 @@ import static java.util.stream.Collectors.toList; import static org.elasticsearch.action.ActionListener.runAfter; import static org.elasticsearch.xpack.eql.execution.ExecutionUtils.copySource; -import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.addFilter; +import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.combineFilters; import static org.elasticsearch.xpack.eql.execution.search.RuntimeUtils.searchHits; import static org.elasticsearch.xpack.eql.util.SearchHitUtils.qualifiedIndex; @@ -314,7 +314,7 @@ private List prepareQueryForMissingEvents(List toCheck) builder.sort(r.timestampField(), SortOrder.ASC); } addKeyFilter(i, sequence, builder); - RuntimeUtils.addFilter(range, builder); + RuntimeUtils.combineFilters(builder, range); result.add(RuntimeUtils.prepareRequest(builder.size(1).trackTotalHits(false), false, Strings.EMPTY_ARRAY)); } else { leading = false; @@ -331,7 +331,7 @@ private void addKeyFilter(int stage, Sequence sequence, SearchSourceBuilder buil } for (int i = 0; i < keys.size(); i++) { Attribute k = keys.get(i); - addFilter(new TermQueryBuilder(k.qualifiedName(), sequence.key().asList().get(i)), builder); + combineFilters(builder, new TermQueryBuilder(k.qualifiedName(), sequence.key().asList().get(i))); } } diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/CIDRMatchFunctionProcessor.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/CIDRMatchFunctionProcessor.java index 2eb9d7630effb..143513f59cdd2 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/CIDRMatchFunctionProcessor.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/expression/function/scalar/string/CIDRMatchFunctionProcessor.java @@ -32,13 +32,13 @@ public CIDRMatchFunctionProcessor(Processor source, List addresses) { public CIDRMatchFunctionProcessor(StreamInput in) throws IOException { source = in.readNamedWriteable(Processor.class); - addresses = in.readNamedWriteableList(Processor.class); + addresses = in.readNamedWriteableCollectionAsList(Processor.class); } @Override public final void writeTo(StreamOutput out) throws IOException { out.writeNamedWriteable(source); - out.writeNamedWriteableList(addresses); + out.writeNamedWriteableCollection(addresses); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlStatsResponse.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlStatsResponse.java index 61417c94c6ff6..b454bef8ab49f 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlStatsResponse.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/plugin/EqlStatsResponse.java @@ -33,12 +33,12 @@ public EqlStatsResponse(ClusterName clusterName, List nodes, @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeStatsResponse::readNodeResponse); + return in.readCollectionAsList(NodeStatsResponse::readNodeResponse); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java index 0aa914083c7f2..2882d083532e8 100644 --- a/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java +++ b/x-pack/plugin/eql/src/main/java/org/elasticsearch/xpack/eql/session/EqlSession.java @@ -22,11 +22,16 @@ import org.elasticsearch.xpack.eql.parser.ParserParams; import org.elasticsearch.xpack.eql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.eql.planner.Planner; +import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.ql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; +import java.util.LinkedHashSet; +import java.util.Set; + import static org.elasticsearch.xpack.ql.util.ActionListeners.map; +import static org.elasticsearch.xpack.ql.util.StringUtils.WILDCARD; public class EqlSession { @@ -116,14 +121,27 @@ private void preAnalyze(LogicalPlan parsed, ActionListener list listener.onFailure(new TaskCancelledException("cancelled")); return; } + Set fieldNames = fieldNames(parsed); indexResolver.resolveAsMergedMapping( indexWildcard, + fieldNames, configuration.indicesOptions(), configuration.runtimeMappings(), map(listener, r -> preAnalyzer.preAnalyze(parsed, r)) ); } + static Set fieldNames(LogicalPlan parsed) { + Set fieldNames = new LinkedHashSet<>(); + parsed.forEachExpressionDown(UnresolvedAttribute.class, ua -> { + fieldNames.add(ua.name()); + if (ua.name().endsWith(WILDCARD) == false) { + fieldNames.add(ua.name() + ".*"); + } + }); + return fieldNames.isEmpty() ? IndexResolver.ALL_FIELDS : fieldNames; + } + private LogicalPlan postAnalyze(LogicalPlan verified) { return postAnalyzer.postAnalyze(verified, configuration); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCSerializationTestCase.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCSerializationTestCase.java index 541b66446e209..ec910f147c014 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCSerializationTestCase.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCSerializationTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractXContentSerializingTestCase; import org.elasticsearch.xcontent.ToXContent; @@ -21,7 +22,7 @@ public abstract class AbstractBWCSerializationTestCase extends AbstractXContentSerializingTestCase { private static List getAllBWCVersions() { - int minCompatVersion = Collections.binarySearch(ALL_VERSIONS, TransportVersion.MINIMUM_COMPATIBLE); + int minCompatVersion = Collections.binarySearch(ALL_VERSIONS, TransportVersions.MINIMUM_COMPATIBLE); return ALL_VERSIONS.subList(minCompatVersion, ALL_VERSIONS.size()); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCWireSerializingTestCase.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCWireSerializingTestCase.java index 887006e12206c..90d9e8467137a 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCWireSerializingTestCase.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/AbstractBWCWireSerializingTestCase.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -20,7 +21,7 @@ public abstract class AbstractBWCWireSerializingTestCase extends AbstractWireSerializingTestCase { private static List getAllBWCVersions() { - int minCompatVersion = Collections.binarySearch(ALL_VERSIONS, TransportVersion.MINIMUM_COMPATIBLE); + int minCompatVersion = Collections.binarySearch(ALL_VERSIONS, TransportVersions.MINIMUM_COMPATIBLE); return ALL_VERSIONS.subList(minCompatVersion, ALL_VERSIONS.size()); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java index f3ecfc99162a4..e5b0a0baa8505 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchRequestTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.eql.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.Settings; @@ -132,14 +133,14 @@ protected EqlSearchRequest mutateInstanceForVersion(EqlSearchRequest instance, T mutatedInstance.size(instance.size()); mutatedInstance.fetchSize(instance.fetchSize()); mutatedInstance.query(instance.query()); - mutatedInstance.ccsMinimizeRoundtrips(version.onOrAfter(TransportVersion.V_7_15_0) == false || instance.ccsMinimizeRoundtrips()); + mutatedInstance.ccsMinimizeRoundtrips(version.onOrAfter(TransportVersions.V_7_15_0) == false || instance.ccsMinimizeRoundtrips()); mutatedInstance.waitForCompletionTimeout(instance.waitForCompletionTimeout()); mutatedInstance.keepAlive(instance.keepAlive()); mutatedInstance.keepOnCompletion(instance.keepOnCompletion()); - mutatedInstance.fetchFields(version.onOrAfter(TransportVersion.V_7_13_0) ? instance.fetchFields() : null); - mutatedInstance.runtimeMappings(version.onOrAfter(TransportVersion.V_7_13_0) ? instance.runtimeMappings() : emptyMap()); - mutatedInstance.resultPosition(version.onOrAfter(TransportVersion.V_7_17_8) ? instance.resultPosition() : "tail"); - mutatedInstance.maxSamplesPerKey(version.onOrAfter(TransportVersion.V_8_7_0) ? instance.maxSamplesPerKey() : 1); + mutatedInstance.fetchFields(version.onOrAfter(TransportVersions.V_7_13_0) ? instance.fetchFields() : null); + mutatedInstance.runtimeMappings(version.onOrAfter(TransportVersions.V_7_13_0) ? instance.runtimeMappings() : emptyMap()); + mutatedInstance.resultPosition(version.onOrAfter(TransportVersions.V_7_17_8) ? instance.resultPosition() : "tail"); + mutatedInstance.maxSamplesPerKey(version.onOrAfter(TransportVersions.V_8_7_0) ? instance.maxSamplesPerKey() : 1); return mutatedInstance; } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java index 8848958dde885..765fd94d4c6be 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/action/EqlSearchResponseTests.java @@ -8,6 +8,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.document.DocumentField; @@ -287,8 +288,8 @@ private List mutateEvents(List original, TransportVersion version) e.index(), e.id(), e.source(), - version.onOrAfter(TransportVersion.V_7_13_0) ? e.fetchFields() : null, - version.onOrAfter(TransportVersion.V_8_500_038) ? e.missing() : e.index().isEmpty() + version.onOrAfter(TransportVersions.V_7_13_0) ? e.fetchFields() : null, + version.onOrAfter(TransportVersions.V_8_500_038) ? e.missing() : e.index().isEmpty() ) ); } @@ -298,10 +299,10 @@ private List mutateEvents(List original, TransportVersion version) public void testEmptyIndexAsMissingEvent() throws IOException { Event event = new Event("", "", new BytesArray("{}".getBytes(StandardCharsets.UTF_8)), null); BytesStreamOutput out = new BytesStreamOutput(); - out.setTransportVersion(TransportVersion.V_8_500_020);// 8.9.1 + out.setTransportVersion(TransportVersions.V_8_500_020);// 8.9.1 event.writeTo(out); ByteArrayStreamInput in = new ByteArrayStreamInput(out.bytes().array()); - in.setTransportVersion(TransportVersion.V_8_500_020); + in.setTransportVersion(TransportVersions.V_8_500_020); Event event2 = Event.readFrom(in); assertTrue(event2.missing()); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java index e9d65201aa149..2df9452137ee4 100644 --- a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/execution/sample/CircuitBreakerTests.java @@ -280,7 +280,7 @@ public String readString() throws IOException { } @Override - public List readStringList() throws IOException { + public List readStringCollectionAsList() throws IOException { return emptyList(); } diff --git a/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/session/IndexResolverFieldNamesTests.java new file mode 100644 index 0000000000000..18e2b377269b0 --- /dev/null +++ b/x-pack/plugin/eql/src/test/java/org/elasticsearch/xpack/eql/session/IndexResolverFieldNamesTests.java @@ -0,0 +1,492 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.eql.session; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.eql.parser.EqlParser; + +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; + +public class IndexResolverFieldNamesTests extends ESTestCase { + + private static final EqlParser parser = new EqlParser(); + + public void testSimpleQueryEqual() { + assertFieldNames( + """ + process where serial_event_id == 1""", + Set.of("serial_event_id.*", "serial_event_id", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testSimpleQueryHeadSix() { + assertFieldNames(""" + process where true | head 6""", Set.of("event.category.*", "event.category", "@timestamp.*", "@timestamp")); + } + + public void testProcessWhereFalse() { + assertFieldNames(""" + process where false""", Set.of("event.category.*", "event.category", "@timestamp.*", "@timestamp")); + } + + public void testProcessNameInexistent() { + assertFieldNames( + """ + process where process_name : "impossible name" or (serial_event_id < 4.5 and serial_event_id >= 3.1)""", + Set.of( + "process_name.*", + "process_name", + "serial_event_id.*", + "serial_event_id", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testSerialEventIdLteAndGt() { + assertFieldNames( + """ + process where serial_event_id<=8 and serial_event_id > 7""", + Set.of("serial_event_id.*", "serial_event_id", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testMinusOneLtExitCode() { + assertFieldNames( + """ + process where -1 < exit_code""", + Set.of("exit_code.*", "exit_code", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testNotExitCodeGtWithHead1() { + assertFieldNames( + """ + process where not (exit_code > -1) + and serial_event_id in (58, 64, 69, 74, 80, 85, 90, 93, 94) + | head 10""", + Set.of( + "exit_code.*", + "exit_code", + "serial_event_id.*", + "serial_event_id", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testProcessWithMultipleConditions1() { + assertFieldNames( + """ + process where (serial_event_id<=8 and serial_event_id > 7) and (opcode==3 and opcode>2)""", + Set.of( + "opcode.*", + "opcode", + "serial_event_id.*", + "serial_event_id", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testWildcardAndMultipleConditions1() { + assertFieldNames( + """ + file where file_path:"x" + and opcode in (0,1,2) and user_name:\"vagrant\"""", + Set.of( + "user_name.*", + "user_name", + "opcode.*", + "opcode", + "file_path.*", + "file_path", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testSequenceOneOneMatch() { + assertFieldNames( + """ + sequence + [process where serial_event_id == 1] + [process where serial_event_id == 2]""", + Set.of("serial_event_id.*", "serial_event_id", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testSequenceOneManyMany_Runs() { + assertFieldNames( + """ + sequence + [process where serial_event_id == 1] + [process where true] with runs=2""", + Set.of("serial_event_id.*", "serial_event_id", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testTwoSequencesWithKeys() { + assertFieldNames( + """ + sequence + [process where true] by unique_pid + [process where opcode == 1] by unique_ppid""", + Set.of( + "opcode.*", + "opcode", + "unique_ppid.*", + "unique_ppid", + "unique_pid.*", + "unique_pid", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testTwoSequencesWithTwoKeys() { + assertFieldNames( + """ + sequence + [process where true] by unique_pid, process_path + [process where opcode == 1] by unique_ppid, parent_process_path""", + Set.of( + "opcode.*", + "opcode", + "unique_ppid.*", + "unique_ppid", + "unique_pid.*", + "unique_pid", + "process_path.*", + "process_path", + "parent_process_path.*", + "parent_process_path", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testFourSequencesByPidWithUntil1() { + assertFieldNames( + """ + sequence + [process where opcode == 1] by unique_pid + [file where opcode == 0] by unique_pid + [file where opcode == 0] by unique_pid + [file where opcode == 0] by unique_pid + until + [file where opcode == 2] by unique_pid""", + Set.of("opcode.*", "opcode", "unique_pid.*", "unique_pid", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testSequencesOnDifferentEventTypesWithBy() { + assertFieldNames( + """ + sequence + [file where opcode==0 and file_name:"svchost.exe"] by unique_pid + [process where opcode == 1] by unique_ppid""", + Set.of( + "opcode.*", + "opcode", + "unique_ppid.*", + "unique_ppid", + "unique_pid.*", + "unique_pid", + "file_name.*", + "file_name", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testMultipleConditions2() { + assertFieldNames( + """ + process where opcode == 1 + and process_name in ("net.exe", "net1.exe") + and not (parent_process_name : "net.exe" + and process_name : "net1.exe") + and command_line : "*group *admin*" and command_line != \"*x*\"""", + Set.of( + "opcode.*", + "opcode", + "process_name.*", + "process_name", + "parent_process_name.*", + "parent_process_name", + "command_line.*", + "command_line", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testTwoSequencesWithKeys2() { + assertFieldNames( + """ + sequence + [file where file_name:"lsass.exe"] by file_path,process_path + [process where true] by process_path,parent_process_path""", + Set.of( + "file_name.*", + "file_name", + "file_path.*", + "file_path", + "process_path.*", + "process_path", + "parent_process_path.*", + "parent_process_path", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testEndsWithAndCondition() { + assertFieldNames( + """ + file where opcode==0 and serial_event_id == 88 and startsWith~("explorer.exeaAAAA", "EXPLORER.exe")""", + Set.of( + "opcode.*", + "opcode", + "serial_event_id.*", + "serial_event_id", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testStringContains2() { + assertFieldNames( + """ + file where opcode==0 and stringContains("ABCDEFGHIexplorer.exeJKLMNOP", file_name)""", + Set.of("opcode.*", "opcode", "file_name.*", "file_name", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testConcatCaseInsensitive() { + assertFieldNames( + "process where concat(serial_event_id, \":\", process_name, opcode) : \"x\"", + Set.of( + "opcode.*", + "opcode", + "process_name.*", + "process_name", + "serial_event_id.*", + "serial_event_id", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testCidrMatch4() { + assertFieldNames( + """ + network where cidrMatch(source_address, "0.0.0.0/0")""", + Set.of("source_address.*", "source_address", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testNumberStringConversion5() { + assertFieldNames( + """ + any where number(string(serial_event_id), 16) == 17""", + Set.of("serial_event_id.*", "serial_event_id", "@timestamp.*", "@timestamp") + ); + } + + public void testSimpleRegex() { + assertFieldNames( + "process where command_line regex \".*\"", + Set.of("command_line.*", "command_line", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testSequenceWithOptionalUserDomain() { + assertFieldNames( + """ + sequence by ?user_domain [process where true] [registry where true]""", + Set.of("user_domain.*", "user_domain", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testTwoSequencesWithTwoKeys_AndOptionals() { + assertFieldNames( + """ + sequence by ?x + [process where true] by unique_pid, process_path, ?z + [process where opcode == 1] by unique_ppid, parent_process_path, ?w""", + Set.of( + "opcode.*", + "opcode", + "x.*", + "x", + "parent_process_path.*", + "parent_process_path", + "process_path.*", + "process_path", + "unique_pid.*", + "unique_pid", + "unique_ppid.*", + "unique_ppid", + "z.*", + "z", + "w.*", + "w", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testOptionalDefaultNullValueFieldEqualNull() { + assertFieldNames( + """ + OPTIONAL where ?optional_field_default_null == null""", + Set.of( + "optional_field_default_null.*", + "optional_field_default_null", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testSequenceOptionalFieldAsQueryKeys() { + assertFieldNames( + """ + sequence by ?x, transID + [ERROR where true] by ?x + [OPTIONAL where true] by ?y""", + Set.of("x.*", "x", "y.*", "y", "transID.*", "transID", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testSequenceAllKeysOptional() { + assertFieldNames( + """ + sequence by ?process.entity_id, ?process.pid + [process where transID == 2] + [file where transID == 0] with runs=2""", + Set.of( + "process.entity_id.*", + "process.entity_id", + "process.pid.*", + "process.pid", + "transID.*", + "transID", + "event.category.*", + "event.category", + "@timestamp.*", + "@timestamp" + ) + ); + } + + public void testMultipleMissing1() { + assertFieldNames(""" + sequence with maxspan=1s + [ test4 where tag == "A" ] + [ test4 where tag == "B" ] + ![ test4 where tag == "M1"] + [ test4 where tag == "C" ] + ![ test4 where tag == "M2"] + [ test4 where tag == "D" ]""", Set.of("tag.*", "tag", "event.category.*", "event.category", "@timestamp.*", "@timestamp")); + } + + public void testWithByKey_runs() { + assertFieldNames( + """ + sequence by k1 with maxspan=1s + [ test5 where tag == "normal" ] by k2 with runs=2 + ![ test5 where tag == "missing" ] by k2 + [ test5 where tag == "normal" ] by k2""", + Set.of("tag.*", "tag", "k1.*", "k1", "k2.*", "k2", "event.category.*", "event.category", "@timestamp.*", "@timestamp") + ); + } + + public void testComplexFiltersWithSample() { + assertFieldNames( + """ + sample by host + [any where uptime > 0 and host == "doom" and (uptime > 15 or bool == true)] by os + [any where port > 100 and ip == "10.0.0.5" or op_sys : "REDHAT"] by op_sys + [any where bool == true] by os""", + Set.of( + "host.*", + "host", + "uptime.*", + "uptime", + "bool.*", + "bool", + "os.*", + "os", + "port.*", + "port", + "ip.*", + "ip", + "op_sys.*", + "op_sys" + ) + ); + } + + public void testOptionalFieldAsKeyAndMultipleConditions() { + assertFieldNames( + """ + sample by ?x, ?y + [failure where (?x == null or ?y == null) and id == 17] + [success where (?y == null and ?x == null) and id == 18]""", + Set.of("x.*", "x", "y.*", "y", "id.*", "id", "event.category.*", "event.category") + ); + } + + private void assertFieldNames(String query, Set expected) { + Set fieldNames = EqlSession.fieldNames(parser.createStatement(query)); + assertThat(fieldNames, equalTo(expected)); + } +} diff --git a/x-pack/plugin/esql/build.gradle b/x-pack/plugin/esql/build.gradle index 0a64f050dcf43..a21c3d0990333 100644 --- a/x-pack/plugin/esql/build.gradle +++ b/x-pack/plugin/esql/build.gradle @@ -56,20 +56,40 @@ sourceSets.main.java { } tasks.getByName('test') { - dependsOn 'cleanGeneratedDocs' -} - -tasks.register('cleanGeneratedDocs', Delete) { - delete "${projectDir}/build/testrun/test/temp/esql/functions" -} - -tasks.register('copyGeneratedDocs', Sync) { - dependsOn 'test' - from "${projectDir}/build/testrun/test/temp/esql/functions" - into "${rootDir}/docs/reference/esql/functions" - include '**/*.asciidoc', '**/*.svg' - preserve { - include '/*.asciidoc' + if (BuildParams.isCi() == false) { + systemProperty 'generateDocs', true + doFirst { + project.delete( + files("${projectDir}/build/testrun/test/temp/esql/functions") + ) + } + doLast { + List signatures = file("${projectDir}/build/testrun/test/temp/esql/functions/signature").list().findAll {it.endsWith("svg")} + List types = file("${projectDir}/build/testrun/test/temp/esql/functions/types").list().findAll {it.endsWith("asciidoc")} + int count = signatures == null ? 0 : signatures.size() + if (count == 0) { + logger.quiet("ESQL Docs: No function signatures created. Skipping sync.") + } else if (count == 1) { + logger.quiet("ESQL Docs: Only updated $signatures and $types, patching them into place") + project.sync { + from "${projectDir}/build/testrun/test/temp/esql/functions" + into "${rootDir}/docs/reference/esql/functions" + include '**/*.asciidoc', '**/*.svg' + preserve { + include '/*.asciidoc', '**/*.asciidoc', '**/*.svg' + } + } + } else { + project.sync { + from "${projectDir}/build/testrun/test/temp/esql/functions" + into "${rootDir}/docs/reference/esql/functions" + include '**/*.asciidoc', '**/*.svg' + preserve { + include '/*.asciidoc' + } + } + } + } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java index b0c2843554a69..dbd3580f90db8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; import java.util.BitSet; import java.util.stream.IntStream; @@ -17,6 +19,8 @@ */ public final class BooleanArrayBlock extends AbstractArrayBlock implements BooleanBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanArrayBlock.class); + private final boolean[] values; public BooleanArrayBlock(boolean[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { @@ -57,6 +61,16 @@ public BooleanBlock expand() { return new BooleanArrayBlock(values, end, firstValues, shiftNullsToExpandedPositions(), MvOrdering.UNORDERED); } + public static long ramBytesEstimated(boolean[] values, int[] firstValueIndexes, BitSet nullsMask) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values, firstValueIndexes, nullsMask); + } + @Override public boolean equals(Object obj) { if (obj instanceof BooleanBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java index 1b4374061b4e1..832b8f9f817bd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanArrayVector.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; /** @@ -15,6 +17,8 @@ */ public final class BooleanArrayVector extends AbstractVector implements BooleanVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanArrayVector.class); + private final boolean[] values; public BooleanArrayVector(boolean[] values, int positionCount) { @@ -47,6 +51,15 @@ public BooleanVector filter(int... positions) { return new FilterBooleanVector(this, positions); } + public static long ramBytesEstimated(boolean[] values) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values); + } + @Override public boolean equals(Object obj) { if (obj instanceof BooleanVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java index d1f43310f00d1..25a34b383a4b4 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBigArrayVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.BitArray; import org.elasticsearch.core.Releasable; @@ -16,6 +17,8 @@ */ public final class BooleanBigArrayVector extends AbstractVector implements BooleanVector, Releasable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanBigArrayVector.class); + private final BitArray values; public BooleanBigArrayVector(BitArray values, int positionCount) { @@ -43,6 +46,11 @@ public boolean isConstant() { return false; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + @Override public BooleanVector filter(int... positions) { return new FilterBooleanVector(this, positions); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java index fbab7132ba525..03f82d7b952cb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanBlock.java @@ -44,6 +44,10 @@ default String getWriteableName() { } static BooleanBlock of(StreamInput in) throws IOException { + final boolean isVector = in.readBoolean(); + if (isVector) { + return BooleanVector.of(in).asBlock(); + } final int positions = in.readVInt(); var builder = newBlockBuilder(positions); for (int i = 0; i < positions; i++) { @@ -63,17 +67,23 @@ static BooleanBlock of(StreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeBoolean(getBoolean(getFirstValueIndex(pos) + valueIndex)); + BooleanVector vector = asVector(); + out.writeBoolean(vector != null); + if (vector != null) { + vector.writeTo(out); + } else { + final int positions = getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeBoolean(getBoolean(getFirstValueIndex(pos) + valueIndex)); + } } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java index c020d41d22cab..1a5687050392e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVector.java @@ -7,6 +7,11 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + /** * Vector that stores boolean values. * This class is generated. Do not edit it. @@ -66,6 +71,35 @@ static int hash(BooleanVector vector) { return result; } + /** Deserializes a Vector from the given stream input. */ + static BooleanVector of(StreamInput in) throws IOException { + final int positions = in.readVInt(); + final boolean constant = in.readBoolean(); + if (constant && positions > 0) { + return new ConstantBooleanVector(in.readBoolean(), positions); + } else { + var builder = BooleanVector.newVectorBuilder(positions); + for (int i = 0; i < positions; i++) { + builder.appendBoolean(in.readBoolean()); + } + return builder.build(); + } + } + + /** Serializes this Vector to the given stream output. */ + default void writeTo(StreamOutput out) throws IOException { + final int positions = getPositionCount(); + out.writeVInt(positions); + out.writeBoolean(isConstant()); + if (isConstant() && positions > 0) { + out.writeBoolean(getBoolean(0)); + } else { + for (int i = 0; i < positions; i++) { + out.writeBoolean(getBoolean(i)); + } + } + } + static Builder newVectorBuilder(int estimatedSize) { return new BooleanVectorBuilder(estimatedSize); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java index 23e069989f4f1..4049aec5d9746 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BooleanVectorBlock.java @@ -7,11 +7,7 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; +import org.apache.lucene.util.RamUsageEstimator; /** * Block view of a BooleanVector. @@ -19,6 +15,8 @@ */ public final class BooleanVectorBlock extends AbstractVectorBlock implements BooleanBlock { + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BooleanVectorBlock.class); + private final BooleanVector vector; BooleanVectorBlock(BooleanVector vector) { @@ -51,44 +49,9 @@ public BooleanBlock filter(int... positions) { return new FilterBooleanVector(vector, positions).asBlock(); } - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Block.class, - "BooleanVectorBlock", - BooleanVectorBlock::of - ); - - @Override - public String getWriteableName() { - return "BooleanVectorBlock"; - } - - static BooleanVectorBlock of(StreamInput in) throws IOException { - final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return new BooleanVectorBlock(new ConstantBooleanVector(in.readBoolean(), positions)); - } else { - var builder = BooleanVector.newVectorBuilder(positions); - for (int i = 0; i < positions; i++) { - builder.appendBoolean(in.readBoolean()); - } - return new BooleanVectorBlock(builder.build()); - } - } - @Override - public void writeTo(StreamOutput out) throws IOException { - final BooleanVector vector = this.vector; - final int positions = vector.getPositionCount(); - out.writeVInt(positions); - out.writeBoolean(vector.isConstant()); - if (vector.isConstant() && positions > 0) { - out.writeBoolean(getBoolean(0)); - } else { - for (int i = 0; i < positions; i++) { - out.writeBoolean(getBoolean(i)); - } - } + public long ramBytesUsed() { + return RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java index 263378e5cf846..38fba2f742bf3 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayBlock.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.BytesRefArray; import java.util.BitSet; @@ -19,6 +20,8 @@ */ public final class BytesRefArrayBlock extends AbstractArrayBlock implements BytesRefBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArrayBlock.class); + private final BytesRefArray values; public BytesRefArrayBlock(BytesRefArray values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { @@ -59,6 +62,16 @@ public BytesRefBlock expand() { return new BytesRefArrayBlock(values, end, firstValues, shiftNullsToExpandedPositions(), MvOrdering.UNORDERED); } + public static long ramBytesEstimated(BytesRefArray values, int[] firstValueIndexes, BitSet nullsMask) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values, firstValueIndexes, nullsMask); + } + @Override public boolean equals(Object obj) { if (obj instanceof BytesRefBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java index 6f1970fe66c38..42c92aa3be136 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefArrayVector.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.BytesRefArray; /** @@ -16,6 +17,8 @@ */ public final class BytesRefArrayVector extends AbstractVector implements BytesRefVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefArrayVector.class); + private final BytesRefArray values; public BytesRefArrayVector(BytesRefArray values, int positionCount) { @@ -48,6 +51,15 @@ public BytesRefVector filter(int... positions) { return new FilterBytesRefVector(this, positions); } + public static long ramBytesEstimated(BytesRefArray values) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values); + } + @Override public boolean equals(Object obj) { if (obj instanceof BytesRefVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java index 4ee11728b72d4..ef063ce7a80be 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefBlock.java @@ -46,6 +46,10 @@ default String getWriteableName() { } static BytesRefBlock of(StreamInput in) throws IOException { + final boolean isVector = in.readBoolean(); + if (isVector) { + return BytesRefVector.of(in).asBlock(); + } final int positions = in.readVInt(); var builder = newBlockBuilder(positions); for (int i = 0; i < positions; i++) { @@ -65,17 +69,23 @@ static BytesRefBlock of(StreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeBytesRef(getBytesRef(getFirstValueIndex(pos) + valueIndex, new BytesRef())); + BytesRefVector vector = asVector(); + out.writeBoolean(vector != null); + if (vector != null) { + vector.writeTo(out); + } else { + final int positions = getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeBytesRef(getBytesRef(getFirstValueIndex(pos) + valueIndex, new BytesRef())); + } } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java index 27b02d5fc651b..6201ab4a3728a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVector.java @@ -8,6 +8,10 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; /** * Vector that stores BytesRef values. @@ -67,6 +71,35 @@ static int hash(BytesRefVector vector) { return result; } + /** Deserializes a Vector from the given stream input. */ + static BytesRefVector of(StreamInput in) throws IOException { + final int positions = in.readVInt(); + final boolean constant = in.readBoolean(); + if (constant && positions > 0) { + return new ConstantBytesRefVector(in.readBytesRef(), positions); + } else { + var builder = BytesRefVector.newVectorBuilder(positions); + for (int i = 0; i < positions; i++) { + builder.appendBytesRef(in.readBytesRef()); + } + return builder.build(); + } + } + + /** Serializes this Vector to the given stream output. */ + default void writeTo(StreamOutput out) throws IOException { + final int positions = getPositionCount(); + out.writeVInt(positions); + out.writeBoolean(isConstant()); + if (isConstant() && positions > 0) { + out.writeBytesRef(getBytesRef(0, new BytesRef())); + } else { + for (int i = 0; i < positions; i++) { + out.writeBytesRef(getBytesRef(i, new BytesRef())); + } + } + } + static Builder newVectorBuilder(int estimatedSize) { return new BytesRefVectorBuilder(estimatedSize); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java index d52ac7e66d04d..2b668ff34fe79 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/BytesRefVectorBlock.java @@ -8,11 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; +import org.apache.lucene.util.RamUsageEstimator; /** * Block view of a BytesRefVector. @@ -20,6 +16,8 @@ */ public final class BytesRefVectorBlock extends AbstractVectorBlock implements BytesRefBlock { + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(BytesRefVectorBlock.class); + private final BytesRefVector vector; BytesRefVectorBlock(BytesRefVector vector) { @@ -52,44 +50,9 @@ public BytesRefBlock filter(int... positions) { return new FilterBytesRefVector(vector, positions).asBlock(); } - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Block.class, - "BytesRefVectorBlock", - BytesRefVectorBlock::of - ); - - @Override - public String getWriteableName() { - return "BytesRefVectorBlock"; - } - - static BytesRefVectorBlock of(StreamInput in) throws IOException { - final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return new BytesRefVectorBlock(new ConstantBytesRefVector(in.readBytesRef(), positions)); - } else { - var builder = BytesRefVector.newVectorBuilder(positions); - for (int i = 0; i < positions; i++) { - builder.appendBytesRef(in.readBytesRef()); - } - return new BytesRefVectorBlock(builder.build()); - } - } - @Override - public void writeTo(StreamOutput out) throws IOException { - final BytesRefVector vector = this.vector; - final int positions = vector.getPositionCount(); - out.writeVInt(positions); - out.writeBoolean(vector.isConstant()); - if (vector.isConstant() && positions > 0) { - out.writeBytesRef(getBytesRef(0, new BytesRef())); - } else { - for (int i = 0; i < positions; i++) { - out.writeBytesRef(getBytesRef(i, new BytesRef())); - } - } + public long ramBytesUsed() { + return RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java index e802548350d39..3d6abc55d9469 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBooleanVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Vector implementation that stores a constant boolean value. * This class is generated. Do not edit it. */ public final class ConstantBooleanVector extends AbstractVector implements BooleanVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBooleanVector.class); + private final boolean value; public ConstantBooleanVector(boolean value, int positionCount) { @@ -45,6 +49,11 @@ public boolean isConstant() { return true; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOfInstance(boolean.class); + } + @Override public boolean equals(Object obj) { if (obj instanceof BooleanVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java index 25f07d72c1d65..896ac52bf0bc0 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantBytesRefVector.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; /** * Vector implementation that stores a constant BytesRef value. @@ -15,6 +16,8 @@ */ public final class ConstantBytesRefVector extends AbstractVector implements BytesRefVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantBytesRefVector.class); + private final BytesRef value; public ConstantBytesRefVector(BytesRef value, int positionCount) { @@ -47,6 +50,11 @@ public boolean isConstant() { return true; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOfInstance(BytesRef.class); + } + @Override public boolean equals(Object obj) { if (obj instanceof BytesRefVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java index 8d196aa33f974..6099864b5b45b 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantDoubleVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Vector implementation that stores a constant double value. * This class is generated. Do not edit it. */ public final class ConstantDoubleVector extends AbstractVector implements DoubleVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantDoubleVector.class); + private final double value; public ConstantDoubleVector(double value, int positionCount) { @@ -45,6 +49,11 @@ public boolean isConstant() { return true; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOfInstance(double.class); + } + @Override public boolean equals(Object obj) { if (obj instanceof DoubleVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java index ad942bb79e779..ab4e063c2ed78 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantIntVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Vector implementation that stores a constant int value. * This class is generated. Do not edit it. */ public final class ConstantIntVector extends AbstractVector implements IntVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantIntVector.class); + private final int value; public ConstantIntVector(int value, int positionCount) { @@ -45,6 +49,11 @@ public boolean isConstant() { return true; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOfInstance(int.class); + } + @Override public boolean equals(Object obj) { if (obj instanceof IntVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java index 79d9ba76db48c..c47c48182f1d1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/ConstantLongVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Vector implementation that stores a constant long value. * This class is generated. Do not edit it. */ public final class ConstantLongVector extends AbstractVector implements LongVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantLongVector.class); + private final long value; public ConstantLongVector(long value, int positionCount) { @@ -45,6 +49,11 @@ public boolean isConstant() { return true; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOfInstance(long.class); + } + @Override public boolean equals(Object obj) { if (obj instanceof LongVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java index c74de042da52b..0fd7aa987c315 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; import java.util.BitSet; import java.util.stream.IntStream; @@ -17,6 +19,8 @@ */ public final class DoubleArrayBlock extends AbstractArrayBlock implements DoubleBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleArrayBlock.class); + private final double[] values; public DoubleArrayBlock(double[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { @@ -57,6 +61,16 @@ public DoubleBlock expand() { return new DoubleArrayBlock(values, end, firstValues, shiftNullsToExpandedPositions(), MvOrdering.UNORDERED); } + public static long ramBytesEstimated(double[] values, int[] firstValueIndexes, BitSet nullsMask) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values, firstValueIndexes, nullsMask); + } + @Override public boolean equals(Object obj) { if (obj instanceof DoubleBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java index 340d434907643..2d2052371ed78 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleArrayVector.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; /** @@ -15,6 +17,8 @@ */ public final class DoubleArrayVector extends AbstractVector implements DoubleVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleArrayVector.class); + private final double[] values; public DoubleArrayVector(double[] values, int positionCount) { @@ -47,6 +51,15 @@ public DoubleVector filter(int... positions) { return new FilterDoubleVector(this, positions); } + public static long ramBytesEstimated(double[] values) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values); + } + @Override public boolean equals(Object obj) { if (obj instanceof DoubleVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java index 138fecbf0725b..129d4b3c31d93 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBigArrayVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.DoubleArray; import org.elasticsearch.core.Releasable; @@ -16,6 +17,8 @@ */ public final class DoubleBigArrayVector extends AbstractVector implements DoubleVector, Releasable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleBigArrayVector.class); + private final DoubleArray values; public DoubleBigArrayVector(DoubleArray values, int positionCount) { @@ -43,6 +46,11 @@ public boolean isConstant() { return false; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + @Override public DoubleVector filter(int... positions) { return new FilterDoubleVector(this, positions); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java index 3f5fe1c180686..cf749f20de9b2 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleBlock.java @@ -44,6 +44,10 @@ default String getWriteableName() { } static DoubleBlock of(StreamInput in) throws IOException { + final boolean isVector = in.readBoolean(); + if (isVector) { + return DoubleVector.of(in).asBlock(); + } final int positions = in.readVInt(); var builder = newBlockBuilder(positions); for (int i = 0; i < positions; i++) { @@ -63,17 +67,23 @@ static DoubleBlock of(StreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeDouble(getDouble(getFirstValueIndex(pos) + valueIndex)); + DoubleVector vector = asVector(); + out.writeBoolean(vector != null); + if (vector != null) { + vector.writeTo(out); + } else { + final int positions = getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeDouble(getDouble(getFirstValueIndex(pos) + valueIndex)); + } } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java index d6886bef60a05..09bdcafffbfe5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVector.java @@ -7,6 +7,11 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + /** * Vector that stores double values. * This class is generated. Do not edit it. @@ -67,6 +72,35 @@ static int hash(DoubleVector vector) { return result; } + /** Deserializes a Vector from the given stream input. */ + static DoubleVector of(StreamInput in) throws IOException { + final int positions = in.readVInt(); + final boolean constant = in.readBoolean(); + if (constant && positions > 0) { + return new ConstantDoubleVector(in.readDouble(), positions); + } else { + var builder = DoubleVector.newVectorBuilder(positions); + for (int i = 0; i < positions; i++) { + builder.appendDouble(in.readDouble()); + } + return builder.build(); + } + } + + /** Serializes this Vector to the given stream output. */ + default void writeTo(StreamOutput out) throws IOException { + final int positions = getPositionCount(); + out.writeVInt(positions); + out.writeBoolean(isConstant()); + if (isConstant() && positions > 0) { + out.writeDouble(getDouble(0)); + } else { + for (int i = 0; i < positions; i++) { + out.writeDouble(getDouble(i)); + } + } + } + static Builder newVectorBuilder(int estimatedSize) { return new DoubleVectorBuilder(estimatedSize); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java index adc4d9d2eee01..168cdc45167f6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/DoubleVectorBlock.java @@ -7,11 +7,7 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; +import org.apache.lucene.util.RamUsageEstimator; /** * Block view of a DoubleVector. @@ -19,6 +15,8 @@ */ public final class DoubleVectorBlock extends AbstractVectorBlock implements DoubleBlock { + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DoubleVectorBlock.class); + private final DoubleVector vector; DoubleVectorBlock(DoubleVector vector) { @@ -51,44 +49,9 @@ public DoubleBlock filter(int... positions) { return new FilterDoubleVector(vector, positions).asBlock(); } - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Block.class, - "DoubleVectorBlock", - DoubleVectorBlock::of - ); - - @Override - public String getWriteableName() { - return "DoubleVectorBlock"; - } - - static DoubleVectorBlock of(StreamInput in) throws IOException { - final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return new DoubleVectorBlock(new ConstantDoubleVector(in.readDouble(), positions)); - } else { - var builder = DoubleVector.newVectorBuilder(positions); - for (int i = 0; i < positions; i++) { - builder.appendDouble(in.readDouble()); - } - return new DoubleVectorBlock(builder.build()); - } - } - @Override - public void writeTo(StreamOutput out) throws IOException { - final DoubleVector vector = this.vector; - final int positions = vector.getPositionCount(); - out.writeVInt(positions); - out.writeBoolean(vector.isConstant()); - if (vector.isConstant() && positions > 0) { - out.writeDouble(getDouble(0)); - } else { - for (int i = 0; i < positions; i++) { - out.writeDouble(getDouble(i)); - } - } + public long ramBytesUsed() { + return RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java index a3e3793498463..ed499489b3bb6 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanBlock.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter block for BooleanBlocks. * This class is generated. Do not edit it. */ final class FilterBooleanBlock extends AbstractFilterBlock implements BooleanBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBooleanBlock.class); + private final BooleanBlock block; FilterBooleanBlock(BooleanBlock block, int... positions) { @@ -65,6 +69,13 @@ public BooleanBlock expand() { return builder.build(); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter blocks encapsulate + // their inner block, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof BooleanBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java index 5f6ad76e35a09..c519bc55dabd8 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBooleanVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter vector for BooleanVectors. * This class is generated. Do not edit it. */ public final class FilterBooleanVector extends AbstractFilterVector implements BooleanVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBooleanVector.class); + private final BooleanVector vector; FilterBooleanVector(BooleanVector vector, int... positions) { @@ -45,6 +49,13 @@ public BooleanVector filter(int... positions) { return new FilterBooleanVector(this, positions); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter vectors encapsulate + // their inner vector, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof BooleanVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java index 3bdd60dbedb2c..ad2266441fad7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefBlock.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; /** * Filter block for BytesRefBlocks. @@ -15,6 +16,8 @@ */ final class FilterBytesRefBlock extends AbstractFilterBlock implements BytesRefBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBytesRefBlock.class); + private final BytesRefBlock block; FilterBytesRefBlock(BytesRefBlock block, int... positions) { @@ -69,6 +72,13 @@ public BytesRefBlock expand() { return builder.build(); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter blocks encapsulate + // their inner block, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof BytesRefBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java index 63ef354fd6d36..3395621af9ccc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterBytesRefVector.java @@ -8,6 +8,7 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; /** * Filter vector for BytesRefVectors. @@ -15,6 +16,8 @@ */ public final class FilterBytesRefVector extends AbstractFilterVector implements BytesRefVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterBytesRefVector.class); + private final BytesRefVector vector; FilterBytesRefVector(BytesRefVector vector, int... positions) { @@ -47,6 +50,13 @@ public BytesRefVector filter(int... positions) { return new FilterBytesRefVector(this, positions); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter vectors encapsulate + // their inner vector, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof BytesRefVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java index 2f8f24b6b134f..51136e2c8def7 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleBlock.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter block for DoubleBlocks. * This class is generated. Do not edit it. */ final class FilterDoubleBlock extends AbstractFilterBlock implements DoubleBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterDoubleBlock.class); + private final DoubleBlock block; FilterDoubleBlock(DoubleBlock block, int... positions) { @@ -65,6 +69,13 @@ public DoubleBlock expand() { return builder.build(); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter blocks encapsulate + // their inner block, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof DoubleBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java index 6e841ec13b4e5..08ce7cefcd48a 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterDoubleVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter vector for DoubleVectors. * This class is generated. Do not edit it. */ public final class FilterDoubleVector extends AbstractFilterVector implements DoubleVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterDoubleVector.class); + private final DoubleVector vector; FilterDoubleVector(DoubleVector vector, int... positions) { @@ -45,6 +49,13 @@ public DoubleVector filter(int... positions) { return new FilterDoubleVector(this, positions); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter vectors encapsulate + // their inner vector, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof DoubleVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java index 21c3bb3ebdfbd..b915e40ab2d05 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntBlock.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter block for IntBlocks. * This class is generated. Do not edit it. */ final class FilterIntBlock extends AbstractFilterBlock implements IntBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterIntBlock.class); + private final IntBlock block; FilterIntBlock(IntBlock block, int... positions) { @@ -65,6 +69,13 @@ public IntBlock expand() { return builder.build(); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter blocks encapsulate + // their inner block, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof IntBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java index 7caf0ee9ee45b..c4954318f0a99 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterIntVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter vector for IntVectors. * This class is generated. Do not edit it. */ public final class FilterIntVector extends AbstractFilterVector implements IntVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterIntVector.class); + private final IntVector vector; FilterIntVector(IntVector vector, int... positions) { @@ -45,6 +49,13 @@ public IntVector filter(int... positions) { return new FilterIntVector(this, positions); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter vectors encapsulate + // their inner vector, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof IntVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java index d67d3e388b6ca..7461e5cbb0dc1 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongBlock.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter block for LongBlocks. * This class is generated. Do not edit it. */ final class FilterLongBlock extends AbstractFilterBlock implements LongBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterLongBlock.class); + private final LongBlock block; FilterLongBlock(LongBlock block, int... positions) { @@ -65,6 +69,13 @@ public LongBlock expand() { return builder.build(); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter blocks encapsulate + // their inner block, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof LongBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java index 96d72f7959474..12dce9350e080 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/FilterLongVector.java @@ -7,12 +7,16 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + /** * Filter vector for LongVectors. * This class is generated. Do not edit it. */ public final class FilterLongVector extends AbstractFilterVector implements LongVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(FilterLongVector.class); + private final LongVector vector; FilterLongVector(LongVector vector, int... positions) { @@ -45,6 +49,13 @@ public LongVector filter(int... positions) { return new FilterLongVector(this, positions); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter vectors encapsulate + // their inner vector, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof LongVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java index 2a52516148ab1..bc7f4fefdefb5 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; import java.util.BitSet; import java.util.stream.IntStream; @@ -17,6 +19,8 @@ */ public final class IntArrayBlock extends AbstractArrayBlock implements IntBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayBlock.class); + private final int[] values; public IntArrayBlock(int[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { @@ -57,6 +61,16 @@ public IntBlock expand() { return new IntArrayBlock(values, end, firstValues, shiftNullsToExpandedPositions(), MvOrdering.UNORDERED); } + public static long ramBytesEstimated(int[] values, int[] firstValueIndexes, BitSet nullsMask) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values, firstValueIndexes, nullsMask); + } + @Override public boolean equals(Object obj) { if (obj instanceof IntBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java index c3a55e9e63075..a614f21c70b1d 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntArrayVector.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; /** @@ -15,6 +17,8 @@ */ public final class IntArrayVector extends AbstractVector implements IntVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntArrayVector.class); + private final int[] values; public IntArrayVector(int[] values, int positionCount) { @@ -47,6 +51,15 @@ public IntVector filter(int... positions) { return new FilterIntVector(this, positions); } + public static long ramBytesEstimated(int[] values) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values); + } + @Override public boolean equals(Object obj) { if (obj instanceof IntVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java index a172ea8b9cdc7..2058006eb45bb 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBigArrayVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.IntArray; import org.elasticsearch.core.Releasable; @@ -16,6 +17,8 @@ */ public final class IntBigArrayVector extends AbstractVector implements IntVector, Releasable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntBigArrayVector.class); + private final IntArray values; public IntBigArrayVector(IntArray values, int positionCount) { @@ -43,6 +46,11 @@ public boolean isConstant() { return false; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + @Override public IntVector filter(int... positions) { return new FilterIntVector(this, positions); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java index 0653824b5b8f4..0fbcbe6c56362 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntBlock.java @@ -44,6 +44,10 @@ default String getWriteableName() { } static IntBlock of(StreamInput in) throws IOException { + final boolean isVector = in.readBoolean(); + if (isVector) { + return IntVector.of(in).asBlock(); + } final int positions = in.readVInt(); var builder = newBlockBuilder(positions); for (int i = 0; i < positions; i++) { @@ -63,17 +67,23 @@ static IntBlock of(StreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeInt(getInt(getFirstValueIndex(pos) + valueIndex)); + IntVector vector = asVector(); + out.writeBoolean(vector != null); + if (vector != null) { + vector.writeTo(out); + } else { + final int positions = getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeInt(getInt(getFirstValueIndex(pos) + valueIndex)); + } } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java index 2b9a1b8b8ccd7..35bab4278d2fd 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVector.java @@ -7,6 +7,11 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + /** * Vector that stores int values. * This class is generated. Do not edit it. @@ -66,6 +71,35 @@ static int hash(IntVector vector) { return result; } + /** Deserializes a Vector from the given stream input. */ + static IntVector of(StreamInput in) throws IOException { + final int positions = in.readVInt(); + final boolean constant = in.readBoolean(); + if (constant && positions > 0) { + return new ConstantIntVector(in.readInt(), positions); + } else { + var builder = IntVector.newVectorBuilder(positions); + for (int i = 0; i < positions; i++) { + builder.appendInt(in.readInt()); + } + return builder.build(); + } + } + + /** Serializes this Vector to the given stream output. */ + default void writeTo(StreamOutput out) throws IOException { + final int positions = getPositionCount(); + out.writeVInt(positions); + out.writeBoolean(isConstant()); + if (isConstant() && positions > 0) { + out.writeInt(getInt(0)); + } else { + for (int i = 0; i < positions; i++) { + out.writeInt(getInt(i)); + } + } + } + static Builder newVectorBuilder(int estimatedSize) { return new IntVectorBuilder(estimatedSize); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java index 4856c81966271..0d6d2e21bf36e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/IntVectorBlock.java @@ -7,11 +7,7 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; +import org.apache.lucene.util.RamUsageEstimator; /** * Block view of a IntVector. @@ -19,6 +15,8 @@ */ public final class IntVectorBlock extends AbstractVectorBlock implements IntBlock { + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(IntVectorBlock.class); + private final IntVector vector; IntVectorBlock(IntVector vector) { @@ -51,44 +49,9 @@ public IntBlock filter(int... positions) { return new FilterIntVector(vector, positions).asBlock(); } - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Block.class, - "IntVectorBlock", - IntVectorBlock::of - ); - - @Override - public String getWriteableName() { - return "IntVectorBlock"; - } - - static IntVectorBlock of(StreamInput in) throws IOException { - final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return new IntVectorBlock(new ConstantIntVector(in.readInt(), positions)); - } else { - var builder = IntVector.newVectorBuilder(positions); - for (int i = 0; i < positions; i++) { - builder.appendInt(in.readInt()); - } - return new IntVectorBlock(builder.build()); - } - } - @Override - public void writeTo(StreamOutput out) throws IOException { - final IntVector vector = this.vector; - final int positions = vector.getPositionCount(); - out.writeVInt(positions); - out.writeBoolean(vector.isConstant()); - if (vector.isConstant() && positions > 0) { - out.writeInt(getInt(0)); - } else { - for (int i = 0; i < positions; i++) { - out.writeInt(getInt(i)); - } - } + public long ramBytesUsed() { + return RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java index ec81eb4d59563..f973539e08009 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayBlock.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; import java.util.BitSet; import java.util.stream.IntStream; @@ -17,6 +19,8 @@ */ public final class LongArrayBlock extends AbstractArrayBlock implements LongBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongArrayBlock.class); + private final long[] values; public LongArrayBlock(long[] values, int positionCount, int[] firstValueIndexes, BitSet nulls, MvOrdering mvOrdering) { @@ -57,6 +61,16 @@ public LongBlock expand() { return new LongArrayBlock(values, end, firstValues, shiftNullsToExpandedPositions(), MvOrdering.UNORDERED); } + public static long ramBytesEstimated(long[] values, int[] firstValueIndexes, BitSet nullsMask) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values, firstValueIndexes, nullsMask); + } + @Override public boolean equals(Object obj) { if (obj instanceof LongBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java index 997412473af1b..dafe44b22415e 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongArrayVector.java @@ -7,6 +7,8 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; /** @@ -15,6 +17,8 @@ */ public final class LongArrayVector extends AbstractVector implements LongVector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongArrayVector.class); + private final long[] values; public LongArrayVector(long[] values, int positionCount) { @@ -47,6 +51,15 @@ public LongVector filter(int... positions) { return new FilterLongVector(this, positions); } + public static long ramBytesEstimated(long[] values) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values); + } + @Override public boolean equals(Object obj) { if (obj instanceof LongVector that) { diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java index 30c69a5792cb7..6db8675a8d69f 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBigArrayVector.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.LongArray; import org.elasticsearch.core.Releasable; @@ -16,6 +17,8 @@ */ public final class LongBigArrayVector extends AbstractVector implements LongVector, Releasable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongBigArrayVector.class); + private final LongArray values; public LongBigArrayVector(LongArray values, int positionCount) { @@ -43,6 +46,11 @@ public boolean isConstant() { return false; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + @Override public LongVector filter(int... positions) { return new FilterLongVector(this, positions); diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java index df80bc8e29aa3..965c59e3b7f73 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongBlock.java @@ -44,6 +44,10 @@ default String getWriteableName() { } static LongBlock of(StreamInput in) throws IOException { + final boolean isVector = in.readBoolean(); + if (isVector) { + return LongVector.of(in).asBlock(); + } final int positions = in.readVInt(); var builder = newBlockBuilder(positions); for (int i = 0; i < positions; i++) { @@ -63,17 +67,23 @@ static LongBlock of(StreamInput in) throws IOException { @Override default void writeTo(StreamOutput out) throws IOException { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - out.writeLong(getLong(getFirstValueIndex(pos) + valueIndex)); + LongVector vector = asVector(); + out.writeBoolean(vector != null); + if (vector != null) { + vector.writeTo(out); + } else { + final int positions = getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + out.writeLong(getLong(getFirstValueIndex(pos) + valueIndex)); + } } } } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java index 0e0b02987dd26..c931613f9e40c 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVector.java @@ -7,6 +7,11 @@ package org.elasticsearch.compute.data; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; + /** * Vector that stores long values. * This class is generated. Do not edit it. @@ -67,6 +72,35 @@ static int hash(LongVector vector) { return result; } + /** Deserializes a Vector from the given stream input. */ + static LongVector of(StreamInput in) throws IOException { + final int positions = in.readVInt(); + final boolean constant = in.readBoolean(); + if (constant && positions > 0) { + return new ConstantLongVector(in.readLong(), positions); + } else { + var builder = LongVector.newVectorBuilder(positions); + for (int i = 0; i < positions; i++) { + builder.appendLong(in.readLong()); + } + return builder.build(); + } + } + + /** Serializes this Vector to the given stream output. */ + default void writeTo(StreamOutput out) throws IOException { + final int positions = getPositionCount(); + out.writeVInt(positions); + out.writeBoolean(isConstant()); + if (isConstant() && positions > 0) { + out.writeLong(getLong(0)); + } else { + for (int i = 0; i < positions; i++) { + out.writeLong(getLong(i)); + } + } + } + static Builder newVectorBuilder(int estimatedSize) { return new LongVectorBuilder(estimatedSize); } diff --git a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java index 9f7c026e8687c..330c10d6927fc 100644 --- a/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java +++ b/x-pack/plugin/esql/compute/src/main/generated-src/org/elasticsearch/compute/data/LongVectorBlock.java @@ -7,11 +7,7 @@ package org.elasticsearch.compute.data; -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; +import org.apache.lucene.util.RamUsageEstimator; /** * Block view of a LongVector. @@ -19,6 +15,8 @@ */ public final class LongVectorBlock extends AbstractVectorBlock implements LongBlock { + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(LongVectorBlock.class); + private final LongVector vector; LongVectorBlock(LongVector vector) { @@ -51,44 +49,9 @@ public LongBlock filter(int... positions) { return new FilterLongVector(vector, positions).asBlock(); } - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Block.class, - "LongVectorBlock", - LongVectorBlock::of - ); - - @Override - public String getWriteableName() { - return "LongVectorBlock"; - } - - static LongVectorBlock of(StreamInput in) throws IOException { - final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return new LongVectorBlock(new ConstantLongVector(in.readLong(), positions)); - } else { - var builder = LongVector.newVectorBuilder(positions); - for (int i = 0; i < positions; i++) { - builder.appendLong(in.readLong()); - } - return new LongVectorBlock(builder.build()); - } - } - @Override - public void writeTo(StreamOutput out) throws IOException { - final LongVector vector = this.vector; - final int positions = vector.getPositionCount(); - out.writeVInt(positions); - out.writeBoolean(vector.isConstant()); - if (vector.isConstant() && positions > 0) { - out.writeLong(getLong(0)); - } else { - for (int i = 0; i < positions; i++) { - out.writeLong(getLong(i)); - } - } + public long ramBytesUsed() { + return RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/module-info.java b/x-pack/plugin/esql/compute/src/main/java/module-info.java index 91e45baa7bda9..280f2467a566c 100644 --- a/x-pack/plugin/esql/compute/src/main/java/module-info.java +++ b/x-pack/plugin/esql/compute/src/main/java/module-info.java @@ -11,7 +11,9 @@ requires org.elasticsearch.server; requires org.elasticsearch.compute.ann; requires org.elasticsearch.xcontent; + // required due to dependency on org.elasticsearch.common.util.concurrent.AbstractAsyncTask requires org.apache.logging.log4j; + requires org.elasticsearch.logging; requires org.elasticsearch.tdigest; exports org.elasticsearch.compute; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java index bd86856ffab04..31f65e9b70053 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/aggregation/blockhash/PackedValuesBlockHash.java @@ -7,8 +7,6 @@ package org.elasticsearch.compute.aggregation.blockhash; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.unit.ByteSizeValue; @@ -24,6 +22,8 @@ import org.elasticsearch.compute.operator.BatchEncoder; import org.elasticsearch.compute.operator.HashAggregationOperator; import org.elasticsearch.compute.operator.MultivalueDedupe; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import java.util.Arrays; import java.util.List; diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java index 95de2a05e4145..88e7b27adf915 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractBlockBuilder.java @@ -24,6 +24,7 @@ abstract class AbstractBlockBuilder implements Block.Builder { protected boolean positionEntryIsOpen; protected boolean hasNonNullValue; + protected boolean hasMultiValues; protected Block.MvOrdering mvOrdering = Block.MvOrdering.UNORDERED; @@ -70,6 +71,9 @@ public AbstractBlockBuilder beginPositionEntry() { public AbstractBlockBuilder endPositionEntry() { positionCount++; positionEntryIsOpen = false; + if (hasMultiValues == false && valueCount != positionCount) { + hasMultiValues = true; + } return this; } @@ -78,7 +82,7 @@ protected final boolean isDense() { } protected final boolean singleValued() { - return firstValueIndexes == null; + return hasMultiValues == false; } protected final void updatePosition() { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java index 03e73224564d9..a74ff44511602 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/AbstractFilterVector.java @@ -15,7 +15,7 @@ */ abstract class AbstractFilterVector extends AbstractVector { - private final int[] positions; + protected final int[] positions; protected AbstractFilterVector(int[] positions) { super(positions.length); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java index 5f1b961547cd3..f948b647d9c45 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Block.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.Accountable; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -30,7 +31,7 @@ * *

Block are immutable and can be passed between threads. */ -public interface Block extends NamedWriteable { +public interface Block extends Accountable, NamedWriteable { /** * {@return an efficient dense single-value view of this block}. @@ -176,11 +177,6 @@ static List getNamedWriteables() { DoubleBlock.ENTRY, BytesRefBlock.ENTRY, BooleanBlock.ENTRY, - IntVectorBlock.ENTRY, - LongVectorBlock.ENTRY, - DoubleVectorBlock.ENTRY, - BytesRefVectorBlock.ENTRY, - BooleanVectorBlock.ENTRY, ConstantNullBlock.ENTRY ); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java new file mode 100644 index 0000000000000..3d912a42f2a5e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/BlockRamUsageEstimator.java @@ -0,0 +1,26 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.util.RamUsageEstimator; +import org.elasticsearch.core.Nullable; + +import java.util.BitSet; + +public final class BlockRamUsageEstimator { + + /** Returns the size in bytes of the int[] object. Otherwise, returns 0 if null. */ + public static long sizeOf(@Nullable int[] arr) { + return arr == null ? 0 : RamUsageEstimator.sizeOf(arr); + } + + /** Returns the size in bytes used by the bitset. Otherwise, returns 0 if null. Not exact, but good enough */ + public static long sizeOfBitSet(@Nullable BitSet bitset) { + return bitset == null ? 0 : RamUsageEstimator.shallowSizeOfInstance(BitSet.class) + (bitset.size() / Byte.SIZE); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java index 5d5f65eb7ab1e..f3d26d443d2fa 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/ConstantNullBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -19,6 +20,8 @@ */ public final class ConstantNullBlock extends AbstractBlock { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(ConstantNullBlock.class); + ConstantNullBlock(int positionCount) { super(positionCount); } @@ -93,6 +96,11 @@ public Block expand() { return this; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED; + } + @Override public boolean equals(Object obj) { if (obj instanceof ConstantNullBlock that) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java index 7d14241801352..364a8f413ef0f 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocBlock.java @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.io.stream.StreamOutput; import java.io.IOException; @@ -15,6 +16,9 @@ * Wrapper around {@link DocVector} to make a valid {@link Block}. */ public class DocBlock extends AbstractVectorBlock implements Block { + + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DocBlock.class); + private final DocVector vector; DocBlock(DocVector vector) { @@ -47,6 +51,11 @@ public Block filter(int... positions) { return new DocBlock(asVector().filter(positions)); } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); + } + /** * A builder the for {@link DocBlock}. */ diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java index a17ab3d64a706..5227609ec71ee 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/DocVector.java @@ -8,11 +8,15 @@ package org.elasticsearch.compute.data; import org.apache.lucene.util.IntroSorter; +import org.apache.lucene.util.RamUsageEstimator; /** * {@link Vector} where each entry references a lucene document. */ public class DocVector extends AbstractVector implements Vector { + + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(DocVector.class); + /** * Per position memory cost to build the shard segment doc map required * to load fields out of order. @@ -178,4 +182,20 @@ public ElementType elementType() { public boolean isConstant() { return shards.isConstant() && segments.isConstant() && docs.isConstant(); } + + public static long ramBytesEstimated( + IntVector shards, + IntVector segments, + IntVector docs, + int[] shardSegmentDocMapForwards, + int[] shardSegmentDocMapBackwards + ) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(shards) + RamUsageEstimator.sizeOf(segments) + RamUsageEstimator.sizeOf(docs) + + RamUsageEstimator.shallowSizeOf(shardSegmentDocMapForwards) + RamUsageEstimator.shallowSizeOf(shardSegmentDocMapBackwards); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(shards, segments, docs, shardSegmentDocMapForwards, shardSegmentDocMapBackwards); + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java index 7954834a0debc..06997090ddbb4 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/Vector.java @@ -7,10 +7,12 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.Accountable; + /** * A dense Vector of single values. */ -public interface Vector { +public interface Vector extends Accountable { /** * {@return Returns a Block view over this vector.} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st index 3f4b348185796..83a91a751e1b6 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayBlock.java.st @@ -9,9 +9,12 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.BytesRefArray; $else$ +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; $endif$ import java.util.BitSet; @@ -23,6 +26,8 @@ import java.util.stream.IntStream; */ public final class $Type$ArrayBlock extends AbstractArrayBlock implements $Type$Block { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$ArrayBlock.class); + $if(BytesRef)$ private final BytesRefArray values; @@ -77,6 +82,16 @@ $endif$ return new $Type$ArrayBlock(values, end, firstValues, shiftNullsToExpandedPositions(), MvOrdering.UNORDERED); } + public static long ramBytesEstimated($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values, int[] firstValueIndexes, BitSet nullsMask) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values) + BlockRamUsageEstimator.sizeOf(firstValueIndexes) + + BlockRamUsageEstimator.sizeOfBitSet(nullsMask) + RamUsageEstimator.shallowSizeOfInstance(MvOrdering.class); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values, firstValueIndexes, nullsMask); + } + @Override public boolean equals(Object obj) { if (obj instanceof $Type$Block that) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st index 4b3f234c05dc6..8b71ea69ab058 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ArrayVector.java.st @@ -9,9 +9,12 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.BytesRefArray; $else$ +import org.apache.lucene.util.RamUsageEstimator; + import java.util.Arrays; $endif$ @@ -21,6 +24,8 @@ $endif$ */ public final class $Type$ArrayVector extends AbstractVector implements $Type$Vector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$ArrayVector.class); + $if(BytesRef)$ private final BytesRefArray values; @@ -70,6 +75,15 @@ $endif$ return new Filter$Type$Vector(this, positions); } + public static long ramBytesEstimated($if(BytesRef)$BytesRefArray$else$$type$[]$endif$ values) { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + + @Override + public long ramBytesUsed() { + return ramBytesEstimated(values); + } + @Override public boolean equals(Object obj) { if (obj instanceof $Type$Vector that) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st index 09566bed63dc3..153cf3c039145 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-BigArrayVector.java.st @@ -7,6 +7,7 @@ package org.elasticsearch.compute.data; +import org.apache.lucene.util.RamUsageEstimator; import org.elasticsearch.common.util.$if(boolean)$Bit$else$$Type$$endif$Array; import org.elasticsearch.core.Releasable; @@ -16,6 +17,8 @@ import org.elasticsearch.core.Releasable; */ public final class $Type$BigArrayVector extends AbstractVector implements $Type$Vector, Releasable { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$BigArrayVector.class); + private final $if(boolean)$Bit$else$$Type$$endif$Array values; public $Type$BigArrayVector($if(boolean)$Bit$else$$Type$$endif$Array values, int positionCount) { @@ -43,6 +46,11 @@ public final class $Type$BigArrayVector extends AbstractVector implements $Type$ return false; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(values); + } + @Override public $Type$Vector filter(int... positions) { return new Filter$Type$Vector(this, positions); diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st index 61fa0ddd26de3..b66ece387f6f0 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Block.java.st @@ -56,6 +56,10 @@ $endif$ } static $Type$Block of(StreamInput in) throws IOException { + final boolean isVector = in.readBoolean(); + if (isVector) { + return $Type$Vector.of(in).asBlock(); + } final int positions = in.readVInt(); var builder = newBlockBuilder(positions); for (int i = 0; i < positions; i++) { @@ -75,21 +79,27 @@ $endif$ @Override default void writeTo(StreamOutput out) throws IOException { - final int positions = getPositionCount(); - out.writeVInt(positions); - for (int pos = 0; pos < positions; pos++) { - if (isNull(pos)) { - out.writeBoolean(true); - } else { - out.writeBoolean(false); - final int valueCount = getValueCount(pos); - out.writeVInt(valueCount); - for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { -$if(BytesRef)$ - out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex, new BytesRef())); -$else$ - out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex)); -$endif$ + $Type$Vector vector = asVector(); + out.writeBoolean(vector != null); + if (vector != null) { + vector.writeTo(out); + } else { + final int positions = getPositionCount(); + out.writeVInt(positions); + for (int pos = 0; pos < positions; pos++) { + if (isNull(pos)) { + out.writeBoolean(true); + } else { + out.writeBoolean(false); + final int valueCount = getValueCount(pos); + out.writeVInt(valueCount); + for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { + $if(BytesRef)$ + out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex, new BytesRef())); + $else$ + out.write$Type$(get$Type$(getFirstValueIndex(pos) + valueIndex)); + $endif$ + } } } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st index 3915c0c0f7fbc..75a77da220435 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-ConstantVector.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.apache.lucene.util.RamUsageEstimator; /** * Vector implementation that stores a constant $type$ value. @@ -17,6 +18,8 @@ $endif$ */ public final class Constant$Type$Vector extends AbstractVector implements $Type$Vector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Constant$Type$Vector.class); + private final $type$ value; public Constant$Type$Vector($type$ value, int positionCount) { @@ -53,6 +56,11 @@ $endif$ return true; } + @Override + public long ramBytesUsed() { + return BASE_RAM_BYTES_USED + RamUsageEstimator.shallowSizeOfInstance($type$.class); + } + @Override public boolean equals(Object obj) { if (obj instanceof $Type$Vector that) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st index 097dfef0c6864..3dfaf02dc7c99 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterBlock.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.apache.lucene.util.RamUsageEstimator; /** * Filter block for $Type$Blocks. @@ -17,6 +18,8 @@ $endif$ */ final class Filter$Type$Block extends AbstractFilterBlock implements $Type$Block { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Filter$Type$Block.class); + private final $Type$Block block; Filter$Type$Block($Type$Block block, int... positions) { @@ -82,6 +85,13 @@ $endif$ return builder.build(); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter blocks encapsulate + // their inner block, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(block) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof $Type$Block that) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st index 0f7c69805f406..bf7c7b399aa76 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-FilterVector.java.st @@ -10,6 +10,7 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.apache.lucene.util.RamUsageEstimator; /** * Filter vector for $Type$Vectors. @@ -17,6 +18,8 @@ $endif$ */ public final class Filter$Type$Vector extends AbstractFilterVector implements $Type$Vector { + private static final long BASE_RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance(Filter$Type$Vector.class); + private final $Type$Vector vector; Filter$Type$Vector($Type$Vector vector, int... positions) { @@ -54,6 +57,13 @@ $endif$ return new Filter$Type$Vector(this, positions); } + @Override + public long ramBytesUsed() { + // from a usage and resource point of view filter vectors encapsulate + // their inner vector, rather than listing it as a child resource + return BASE_RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector) + RamUsageEstimator.sizeOf(positions); + } + @Override public boolean equals(Object obj) { if (obj instanceof $Type$Vector that) { diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st index 85cc558b3f5f3..f48ad43faefc8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-Vector.java.st @@ -10,6 +10,10 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; $endif$ +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; + +import java.io.IOException; /** * Vector that stores $type$ values. @@ -101,6 +105,43 @@ $endif$ return result; } + /** Deserializes a Vector from the given stream input. */ + static $Type$Vector of(StreamInput in) throws IOException { + final int positions = in.readVInt(); + final boolean constant = in.readBoolean(); + if (constant && positions > 0) { + return new Constant$Type$Vector(in.read$Type$(), positions); + } else { + var builder = $Type$Vector.newVectorBuilder(positions); + for (int i = 0; i < positions; i++) { + builder.append$Type$(in.read$Type$()); + } + return builder.build(); + } + } + + /** Serializes this Vector to the given stream output. */ + default void writeTo(StreamOutput out) throws IOException { + final int positions = getPositionCount(); + out.writeVInt(positions); + out.writeBoolean(isConstant()); + if (isConstant() && positions > 0) { +$if(BytesRef)$ + out.write$Type$(get$Type$(0, new BytesRef())); +$else$ + out.write$Type$(get$Type$(0)); +$endif$ + } else { + for (int i = 0; i < positions; i++) { +$if(BytesRef)$ + out.write$Type$(get$Type$(i, new BytesRef())); +$else$ + out.write$Type$(get$Type$(i)); +$endif$ + } + } + } + static Builder newVectorBuilder(int estimatedSize) { return new $Type$VectorBuilder(estimatedSize); } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st index d323465ebb1a0..c6c8df46ba4e8 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/data/X-VectorBlock.java.st @@ -9,13 +9,8 @@ package org.elasticsearch.compute.data; $if(BytesRef)$ import org.apache.lucene.util.BytesRef; -$else$ $endif$ -import org.elasticsearch.common.io.stream.NamedWriteableRegistry; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; - -import java.io.IOException; +import org.apache.lucene.util.RamUsageEstimator; /** * Block view of a $Type$Vector. @@ -23,6 +18,8 @@ import java.io.IOException; */ public final class $Type$VectorBlock extends AbstractVectorBlock implements $Type$Block { + private static final long RAM_BYTES_USED = RamUsageEstimator.shallowSizeOfInstance($Type$VectorBlock.class); + private final $Type$Vector vector; $Type$VectorBlock($Type$Vector vector) { @@ -60,52 +57,9 @@ $endif$ return new Filter$Type$Vector(vector, positions).asBlock(); } - public static final NamedWriteableRegistry.Entry ENTRY = new NamedWriteableRegistry.Entry( - Block.class, - "$Type$VectorBlock", - $Type$VectorBlock::of - ); - @Override - public String getWriteableName() { - return "$Type$VectorBlock"; - } - - static $Type$VectorBlock of(StreamInput in) throws IOException { - final int positions = in.readVInt(); - final boolean constant = in.readBoolean(); - if (constant && positions > 0) { - return new $Type$VectorBlock(new Constant$Type$Vector(in.read$Type$(), positions)); - } else { - var builder = $Type$Vector.newVectorBuilder(positions); - for (int i = 0; i < positions; i++) { - builder.append$Type$(in.read$Type$()); - } - return new $Type$VectorBlock(builder.build()); - } - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - final $Type$Vector vector = this.vector; - final int positions = vector.getPositionCount(); - out.writeVInt(positions); - out.writeBoolean(vector.isConstant()); - if (vector.isConstant() && positions > 0) { -$if(BytesRef)$ - out.write$Type$(get$Type$(0, new BytesRef())); -$else$ - out.write$Type$(get$Type$(0)); -$endif$ - } else { - for (int i = 0; i < positions; i++) { -$if(BytesRef)$ - out.write$Type$(get$Type$(i, new BytesRef())); -$else$ - out.write$Type$(get$Type$(i)); -$endif$ - } - } + public long ramBytesUsed() { + return RAM_BYTES_USED + RamUsageEstimator.sizeOf(vector); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java index 35ccb7daca1a4..e0b25469d5ab2 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneOperator.java @@ -7,328 +7,156 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.BulkScorer; import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.DocIdSetIterator; +import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Weight; +import org.apache.lucene.util.Bits; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.compute.operator.DriverContext; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.SourceOperator; -import org.elasticsearch.core.Nullable; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.xcontent.XContentBuilder; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.ArrayList; -import java.util.Iterator; -import java.util.List; import java.util.Objects; -import java.util.Spliterator; -import java.util.Spliterators; import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; public abstract class LuceneOperator extends SourceOperator { public static final int NO_LIMIT = Integer.MAX_VALUE; - private static final int MAX_DOCS_PER_SLICE = 250_000; // copied from IndexSearcher - private static final int MAX_SEGMENTS_PER_SLICE = 5; // copied from IndexSearcher - - @Nullable - final IndexReader indexReader; - final int shardId; - @Nullable - final Query query; - final List leaves; + private int processSlices; final int maxPageSize; - final int minPageSize; + private final LuceneSliceQueue sliceQueue; - Weight weight; + private LuceneSlice currentSlice; + private int sliceIndex; - int currentLeaf = 0; - LuceneSourceOperator.PartialLeafReaderContext currentLeafReaderContext = null; - BulkScorer currentScorer = null; - private Thread createdScorerThread = null; + private LuceneScorer currentScorer; - int currentPagePos; - int currentScorerPos; int pagesEmitted; + boolean doneCollecting; - LuceneOperator(IndexReader reader, int shardId, Query query, int maxPageSize) { - this.indexReader = reader; - this.shardId = shardId; - this.leaves = reader.leaves().stream().map(PartialLeafReaderContext::new).collect(Collectors.toList()); - this.query = query; + public LuceneOperator(int maxPageSize, LuceneSliceQueue sliceQueue) { this.maxPageSize = maxPageSize; - this.minPageSize = Math.max(1, maxPageSize / 2); + this.sliceQueue = sliceQueue; } - LuceneOperator(Weight weight, int shardId, List leaves, int maxPageSize) { - this.indexReader = null; - this.shardId = shardId; - this.leaves = leaves; - this.query = null; - this.weight = weight; - this.maxPageSize = maxPageSize; - this.minPageSize = maxPageSize / 2; + public interface Factory extends SourceOperator.SourceOperatorFactory { + int taskConcurrency(); } - abstract LuceneOperator docSliceLuceneOperator(List slice); - - abstract LuceneOperator segmentSliceLuceneOperator(IndexSearcher.LeafSlice leafSlice); - - public abstract static class LuceneOperatorFactory implements SourceOperatorFactory { - - final Function queryFunction; - - final DataPartitioning dataPartitioning; - - final int maxPageSize; - - final List searchContexts; - - final int taskConcurrency; - - final int limit; - - private Iterator iterator; - - public LuceneOperatorFactory( - List searchContexts, - Function queryFunction, - DataPartitioning dataPartitioning, - int taskConcurrency, - int maxPageSize, - int limit - ) { - this.searchContexts = searchContexts; - this.queryFunction = queryFunction; - this.dataPartitioning = dataPartitioning; - this.taskConcurrency = taskConcurrency; - this.maxPageSize = maxPageSize; - this.limit = limit; - } - - abstract LuceneOperator luceneOperatorForShard(int shardIndex); - - Iterator sourceOperatorIterator() { - final List luceneOperators = new ArrayList<>(); - for (int shardIndex = 0; shardIndex < searchContexts.size(); shardIndex++) { - LuceneOperator queryOperator = luceneOperatorForShard(shardIndex); - switch (dataPartitioning) { - case SHARD -> luceneOperators.add(queryOperator); - case SEGMENT -> luceneOperators.addAll(queryOperator.segmentSlice()); - case DOC -> luceneOperators.addAll(queryOperator.docSlice(taskConcurrency)); - default -> throw new UnsupportedOperationException(); - } - } - return luceneOperators.iterator(); - } - - @Override - public final SourceOperator get(DriverContext driverContext) { - if (iterator == null) { - iterator = sourceOperatorIterator(); - } - if (iterator.hasNext()) { - return iterator.next(); - } else { - throw new IllegalStateException("Lucene operator factory exhausted"); - } - } - - public int size() { - return Math.toIntExact( - StreamSupport.stream(Spliterators.spliteratorUnknownSize(sourceOperatorIterator(), Spliterator.ORDERED), false).count() - ); - } - - public int maxPageSize() { - return maxPageSize; - } - - public int limit() { - return limit; - } - } - - /** - * Split this source operator into a given number of slices - */ - public List docSlice(int numSlices) { - if (weight != null) { - throw new IllegalStateException("can only call slice method once"); - } - initializeWeightIfNecessary(); + @Override + public void close() { - List operators = new ArrayList<>(); - for (List slice : docSlices(indexReader, numSlices)) { - operators.add(docSliceLuceneOperator(slice)); - } - return operators; } - static final List> docSlices(IndexReader indexReader, int numSlices) { - final int totalDocCount = indexReader.maxDoc(); - final int normalMaxDocsPerSlice = totalDocCount / numSlices; - final int extraDocsInFirstSlice = totalDocCount % numSlices; - final List> slices = new ArrayList<>(); - int docsAllocatedInCurrentSlice = 0; - List currentSlice = null; - int maxDocsPerSlice = normalMaxDocsPerSlice + extraDocsInFirstSlice; - for (LeafReaderContext ctx : indexReader.leaves()) { - final int numDocsInLeaf = ctx.reader().maxDoc(); - int minDoc = 0; - while (minDoc < numDocsInLeaf) { - int numDocsToUse = Math.min(maxDocsPerSlice - docsAllocatedInCurrentSlice, numDocsInLeaf - minDoc); - if (numDocsToUse <= 0) { - break; - } + LuceneScorer getCurrentOrLoadNextScorer() { + while (currentScorer == null || currentScorer.isDone()) { + if (currentSlice == null || sliceIndex >= currentSlice.numLeaves()) { + sliceIndex = 0; + currentSlice = sliceQueue.nextSlice(); if (currentSlice == null) { - currentSlice = new ArrayList<>(); + doneCollecting = true; + return null; + } else { + processSlices++; } - currentSlice.add(new PartialLeafReaderContext(ctx, minDoc, minDoc + numDocsToUse)); - minDoc += numDocsToUse; - docsAllocatedInCurrentSlice += numDocsToUse; - if (docsAllocatedInCurrentSlice == maxDocsPerSlice) { - slices.add(currentSlice); - maxDocsPerSlice = normalMaxDocsPerSlice; // once the first slice with the extra docs is added, no need for extra docs - currentSlice = null; - docsAllocatedInCurrentSlice = 0; + if (currentSlice.numLeaves() == 0) { + continue; } } + final PartialLeafReaderContext partialLeaf = currentSlice.getLeaf(sliceIndex++); + final LeafReaderContext leaf = partialLeaf.leafReaderContext; + if (currentScorer == null || currentScorer.leafReaderContext() != leaf) { + final Weight weight = currentSlice.weight().get(); + currentScorer = new LuceneScorer(currentSlice.shardIndex(), currentSlice.searchContext(), weight, leaf); + } + assert currentScorer.maxPosition <= partialLeaf.maxDoc : currentScorer.maxPosition + ">" + partialLeaf.maxDoc; + currentScorer.maxPosition = partialLeaf.maxDoc; + currentScorer.position = Math.max(currentScorer.position, partialLeaf.minDoc); } - if (currentSlice != null) { - slices.add(currentSlice); - } - if (numSlices < totalDocCount && slices.size() != numSlices) { - throw new IllegalStateException("wrong number of slices, expected " + numSlices + " but got " + slices.size()); - } - if (slices.stream() - .flatMapToInt( - l -> l.stream().mapToInt(partialLeafReaderContext -> partialLeafReaderContext.maxDoc - partialLeafReaderContext.minDoc) - ) - .sum() != totalDocCount) { - throw new IllegalStateException("wrong doc count"); + if (Thread.currentThread() != currentScorer.executingThread) { + currentScorer.reinitialize(); } - return slices; + return currentScorer; } /** - * Uses Lucene's own slicing method, which creates per-segment level slices + * Wraps a {@link BulkScorer} with shard information */ - public List segmentSlice() { - if (weight != null) { - throw new IllegalStateException("can only call slice method once"); - } - initializeWeightIfNecessary(); - List operators = new ArrayList<>(); - for (IndexSearcher.LeafSlice leafSlice : segmentSlices(indexReader)) { - operators.add(segmentSliceLuceneOperator(leafSlice)); + static final class LuceneScorer { + private final int shardIndex; + private final SearchContext searchContext; + private final Weight weight; + private final LeafReaderContext leafReaderContext; + + private BulkScorer bulkScorer; + private int position; + private int maxPosition; + private Thread executingThread; + + LuceneScorer(int shardIndex, SearchContext searchContext, Weight weight, LeafReaderContext leafReaderContext) { + this.shardIndex = shardIndex; + this.searchContext = searchContext; + this.weight = weight; + this.leafReaderContext = leafReaderContext; + reinitialize(); } - return operators; - } - static IndexSearcher.LeafSlice[] segmentSlices(IndexReader indexReader) { - return IndexSearcher.slices(indexReader.leaves(), MAX_DOCS_PER_SLICE, MAX_SEGMENTS_PER_SLICE); - } - - @Override - public void finish() { - throw new UnsupportedOperationException(); - } - - void initializeWeightIfNecessary() { - if (weight == null) { + private void reinitialize() { + this.executingThread = Thread.currentThread(); try { - IndexSearcher indexSearcher = new IndexSearcher(indexReader); - weight = indexSearcher.createWeight(indexSearcher.rewrite(new ConstantScoreQuery(query)), ScoreMode.COMPLETE_NO_SCORES, 1); + this.bulkScorer = weight.bulkScorer(leafReaderContext); } catch (IOException e) { throw new UncheckedIOException(e); } } - } - boolean maybeReturnEarlyOrInitializeScorer() { - // Reset the Scorer if the operator is run by a different thread - if (currentLeafReaderContext != null && createdScorerThread != Thread.currentThread()) { - try { - currentScorer = weight.bulkScorer(currentLeafReaderContext.leafReaderContext); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - createdScorerThread = Thread.currentThread(); - return false; + void scoreNextRange(LeafCollector collector, Bits acceptDocs, int numDocs) throws IOException { + assert isDone() == false : "scorer is exhausted"; + position = bulkScorer.score(collector, acceptDocs, position, Math.min(maxPosition, position + numDocs)); } - if (currentLeafReaderContext == null) { - assert currentScorer == null : "currentScorer wasn't reset"; - do { - currentLeafReaderContext = leaves.get(currentLeaf); - currentScorerPos = currentLeafReaderContext.minDoc; - try { - currentScorer = weight.bulkScorer(currentLeafReaderContext.leafReaderContext); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - if (currentScorer == null) { - // doesn't match anything; move to the next leaf or abort if finished - currentLeaf++; - if (doneCollecting()) { - return true; - } - } - } while (currentScorer == null); - createdScorerThread = Thread.currentThread(); + + LeafReaderContext leafReaderContext() { + return leafReaderContext; } - return false; - } - protected abstract boolean doneCollecting(); + boolean isDone() { + return bulkScorer == null || position >= maxPosition; + } - @Override - public void close() { + void markAsDone() { + position = DocIdSetIterator.NO_MORE_DOCS; + } + int shardIndex() { + return shardIndex; + } + + SearchContext searchContext() { + return searchContext; + } } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(this.getClass().getSimpleName()).append("["); - sb.append("shardId=").append(shardId); sb.append(", maxPageSize=").append(maxPageSize); sb.append("]"); return sb.toString(); } - static class PartialLeafReaderContext { - - final LeafReaderContext leafReaderContext; - final int minDoc; // incl - final int maxDoc; // excl - - PartialLeafReaderContext(LeafReaderContext leafReaderContext, int minDoc, int maxDoc) { - this.leafReaderContext = leafReaderContext; - this.minDoc = minDoc; - this.maxDoc = maxDoc; - } - - PartialLeafReaderContext(LeafReaderContext leafReaderContext) { - this(leafReaderContext, 0, leafReaderContext.reader().maxDoc()); - } - - } - @Override public Operator.Status status() { return new Status(this); @@ -341,43 +169,51 @@ public static class Status implements Operator.Status { Status::new ); - private final int currentLeaf; - private final int totalLeaves; + private final int processedSlices; + private final int totalSlices; private final int pagesEmitted; - private final int leafPosition; - private final int leafSize; + private final int slicePosition; + private final int sliceSize; private Status(LuceneOperator operator) { - currentLeaf = operator.currentLeaf; - totalLeaves = operator.leaves.size(); - leafPosition = operator.currentScorerPos; - LuceneOperator.PartialLeafReaderContext ctx = operator.currentLeafReaderContext; - leafSize = ctx == null ? 0 : ctx.maxDoc - ctx.minDoc; + processedSlices = operator.processSlices; + totalSlices = operator.sliceQueue.totalSlices(); + LuceneSlice slice = operator.currentSlice; + final PartialLeafReaderContext leaf; + int sliceIndex = operator.sliceIndex; + if (slice != null && sliceIndex < slice.numLeaves()) { + leaf = slice.getLeaf(sliceIndex); + } else { + leaf = null; + } + LuceneScorer scorer = operator.currentScorer; + slicePosition = scorer != null ? scorer.position : 0; + sliceSize = leaf != null ? leaf.maxDoc - leaf.minDoc : 0; pagesEmitted = operator.pagesEmitted; } - Status(int currentLeaf, int totalLeaves, int pagesEmitted, int leafPosition, int leafSize) { - this.currentLeaf = currentLeaf; - this.totalLeaves = totalLeaves; - this.leafPosition = leafPosition; - this.leafSize = leafSize; + Status(int processedSlices, int totalSlices, int pagesEmitted, int slicePosition, int sliceSize) { + this.processedSlices = processedSlices; + this.totalSlices = totalSlices; + this.slicePosition = slicePosition; + this.sliceSize = sliceSize; this.pagesEmitted = pagesEmitted; } Status(StreamInput in) throws IOException { - currentLeaf = in.readVInt(); - totalLeaves = in.readVInt(); - leafPosition = in.readVInt(); - leafSize = in.readVInt(); + processedSlices = in.readVInt(); + totalSlices = in.readVInt(); + slicePosition = in.readVInt(); + sliceSize = in.readVInt(); pagesEmitted = in.readVInt(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(currentLeaf); - out.writeVInt(totalLeaves); - out.writeVInt(leafPosition); - out.writeVInt(leafSize); + out.writeVInt(processedSlices); + out.writeVInt(totalSlices); + out.writeVInt(slicePosition); + out.writeVInt(sliceSize); out.writeVInt(pagesEmitted); } @@ -387,11 +223,11 @@ public String getWriteableName() { } public int currentLeaf() { - return currentLeaf; + return processedSlices; } public int totalLeaves() { - return totalLeaves; + return totalSlices; } public int pagesEmitted() { @@ -399,20 +235,20 @@ public int pagesEmitted() { } public int leafPosition() { - return leafPosition; + return slicePosition; } public int leafSize() { - return leafSize; + return sliceSize; } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field("current_leaf", currentLeaf); - builder.field("total_leaves", totalLeaves); - builder.field("leaf_position", leafPosition); - builder.field("leaf_size", leafSize); + builder.field("processed_sliced", processedSlices); + builder.field("total_slices", totalSlices); + builder.field("slice_position", slicePosition); + builder.field("slice_size", sliceSize); builder.field("pages_emitted", pagesEmitted); return builder.endObject(); } @@ -422,16 +258,16 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Status status = (Status) o; - return currentLeaf == status.currentLeaf - && totalLeaves == status.totalLeaves + return processedSlices == status.processedSlices + && totalSlices == status.totalSlices && pagesEmitted == status.pagesEmitted - && leafPosition == status.leafPosition - && leafSize == status.leafSize; + && slicePosition == status.slicePosition + && sliceSize == status.sliceSize; } @Override public int hashCode() { - return Objects.hash(currentLeaf, totalLeaves, pagesEmitted, leafPosition, leafSize); + return Objects.hash(processedSlices, totalSlices, pagesEmitted, slicePosition, sliceSize); } @Override @@ -439,4 +275,16 @@ public String toString() { return Strings.toString(this); } } + + static Function weightFunction(Function queryFunction, ScoreMode scoreMode) { + return ctx -> { + final var query = queryFunction.apply(ctx); + final var searcher = ctx.searcher(); + try { + return searcher.createWeight(searcher.rewrite(new ConstantScoreQuery(query)), scoreMode, 1); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + }; + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java new file mode 100644 index 0000000000000..c3fe03ae88bb3 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSlice.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.search.Weight; +import org.elasticsearch.search.internal.SearchContext; + +import java.util.List; +import java.util.function.Supplier; + +/** + * Holds a list of multiple partial Lucene segments + */ +public record LuceneSlice(int shardIndex, SearchContext searchContext, List leaves, Supplier weight) { + + int numLeaves() { + return leaves.size(); + } + + PartialLeafReaderContext getLeaf(int index) { + return leaves.get(index); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java new file mode 100644 index 0000000000000..7d96416649636 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSliceQueue.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.Weight; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.search.internal.SearchContext; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Queue; +import java.util.concurrent.ConcurrentLinkedQueue; +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * Shared Lucene slices between Lucene operators. + */ +public final class LuceneSliceQueue { + private static final int MAX_DOCS_PER_SLICE = 250_000; // copied from IndexSearcher + private static final int MAX_SEGMENTS_PER_SLICE = 5; // copied from IndexSearcher + + private final int totalSlices; + private final Queue slices; + + private LuceneSliceQueue(List slices) { + this.totalSlices = slices.size(); + this.slices = new ConcurrentLinkedQueue<>(slices); + } + + @Nullable + public LuceneSlice nextSlice() { + return slices.poll(); + } + + public int totalSlices() { + return totalSlices; + } + + public static LuceneSliceQueue create( + List searchContexts, + Function weightFunction, + DataPartitioning dataPartitioning, + int taskConcurrency + ) { + final List slices = new ArrayList<>(); + for (int shardIndex = 0; shardIndex < searchContexts.size(); shardIndex++) { + final SearchContext searchContext = searchContexts.get(shardIndex); + final List leafContexts = searchContext.searcher().getLeafContexts(); + List> groups = switch (dataPartitioning) { + case SHARD -> Collections.singletonList(leafContexts.stream().map(PartialLeafReaderContext::new).toList()); + case SEGMENT -> segmentSlices(leafContexts); + case DOC -> docSlices(searchContext.searcher().getIndexReader(), taskConcurrency); + }; + final Weight[] cachedWeight = new Weight[1]; + final Supplier weight = () -> { + if (cachedWeight[0] == null) { + cachedWeight[0] = weightFunction.apply(searchContext); + } + return cachedWeight[0]; + }; + if (groups.size() > 1) { + weight.get(); // eagerly build Weight once + } + for (List group : groups) { + slices.add(new LuceneSlice(shardIndex, searchContext, group, weight)); + } + } + return new LuceneSliceQueue(slices); + } + + static List> docSlices(IndexReader indexReader, int numSlices) { + final int totalDocCount = indexReader.maxDoc(); + final int normalMaxDocsPerSlice = totalDocCount / numSlices; + final int extraDocsInFirstSlice = totalDocCount % numSlices; + final List> slices = new ArrayList<>(); + int docsAllocatedInCurrentSlice = 0; + List currentSlice = null; + int maxDocsPerSlice = normalMaxDocsPerSlice + extraDocsInFirstSlice; + for (LeafReaderContext ctx : indexReader.leaves()) { + final int numDocsInLeaf = ctx.reader().maxDoc(); + int minDoc = 0; + while (minDoc < numDocsInLeaf) { + int numDocsToUse = Math.min(maxDocsPerSlice - docsAllocatedInCurrentSlice, numDocsInLeaf - minDoc); + if (numDocsToUse <= 0) { + break; + } + if (currentSlice == null) { + currentSlice = new ArrayList<>(); + } + currentSlice.add(new PartialLeafReaderContext(ctx, minDoc, minDoc + numDocsToUse)); + minDoc += numDocsToUse; + docsAllocatedInCurrentSlice += numDocsToUse; + if (docsAllocatedInCurrentSlice == maxDocsPerSlice) { + slices.add(currentSlice); + maxDocsPerSlice = normalMaxDocsPerSlice; // once the first slice with the extra docs is added, no need for extra docs + currentSlice = null; + docsAllocatedInCurrentSlice = 0; + } + } + } + if (currentSlice != null) { + slices.add(currentSlice); + } + if (numSlices < totalDocCount && slices.size() != numSlices) { + throw new IllegalStateException("wrong number of slices, expected " + numSlices + " but got " + slices.size()); + } + if (slices.stream() + .flatMapToInt( + l -> l.stream().mapToInt(partialLeafReaderContext -> partialLeafReaderContext.maxDoc - partialLeafReaderContext.minDoc) + ) + .sum() != totalDocCount) { + throw new IllegalStateException("wrong doc count"); + } + return slices; + } + + static List> segmentSlices(List leafContexts) { + IndexSearcher.LeafSlice[] gs = IndexSearcher.slices(leafContexts, MAX_DOCS_PER_SLICE, MAX_SEGMENTS_PER_SLICE); + return Arrays.stream(gs).map(g -> Arrays.stream(g.leaves).map(PartialLeafReaderContext::new).toList()).toList(); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java index 467ca03ea4b21..540cee388efc9 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneSourceOperator.java @@ -7,39 +7,43 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.index.IndexReader; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.Weight; +import org.apache.lucene.search.ScoreMode; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.search.internal.SearchContext; import java.io.IOException; import java.io.UncheckedIOException; -import java.util.Arrays; import java.util.List; import java.util.function.Function; -import java.util.stream.Collectors; /** * Source operator that incrementally runs Lucene searches */ public class LuceneSourceOperator extends LuceneOperator { - private int numCollectedDocs = 0; + private int currentPagePos = 0; + private int remainingDocs; - private final int maxCollectedDocs; + private IntVector.Builder docsBuilder; + private final LeafCollector leafCollector; + private final int minPageSize; - private IntVector.Builder currentDocsBuilder; + public static class Factory implements LuceneOperator.Factory { + private final DataPartitioning dataPartitioning; + private final int taskConcurrency; + private final int maxPageSize; + private final int limit; + private final LuceneSliceQueue sliceQueue; - public static class LuceneSourceOperatorFactory extends LuceneOperatorFactory { - - public LuceneSourceOperatorFactory( + public Factory( List searchContexts, Function queryFunction, DataPartitioning dataPartitioning, @@ -47,14 +51,30 @@ public LuceneSourceOperatorFactory( int maxPageSize, int limit ) { - super(searchContexts, queryFunction, dataPartitioning, taskConcurrency, maxPageSize, limit); + this.maxPageSize = maxPageSize; + this.limit = limit; + this.dataPartitioning = dataPartitioning; + var weightFunction = weightFunction(queryFunction, ScoreMode.COMPLETE_NO_SCORES); + this.sliceQueue = LuceneSliceQueue.create(searchContexts, weightFunction, dataPartitioning, taskConcurrency); + this.taskConcurrency = Math.min(sliceQueue.totalSlices(), taskConcurrency); } @Override - LuceneOperator luceneOperatorForShard(int shardIndex) { - final SearchContext ctx = searchContexts.get(shardIndex); - final Query query = queryFunction.apply(ctx); - return new LuceneSourceOperator(ctx.getSearchExecutionContext().getIndexReader(), shardIndex, query, maxPageSize, limit); + public SourceOperator get(DriverContext driverContext) { + return new LuceneSourceOperator(maxPageSize, sliceQueue, limit); + } + + @Override + public int taskConcurrency() { + return taskConcurrency; + } + + public int maxPageSize() { + return maxPageSize; + } + + public int limit() { + return limit; } @Override @@ -69,112 +89,75 @@ public String describe() { } } - public LuceneSourceOperator(IndexReader reader, int shardId, Query query, int maxPageSize, int limit) { - super(reader, shardId, query, maxPageSize); - this.currentDocsBuilder = IntVector.newVectorBuilder(maxPageSize); - this.maxCollectedDocs = limit; - } + public LuceneSourceOperator(int maxPageSize, LuceneSliceQueue sliceQueue, int limit) { + super(maxPageSize, sliceQueue); + this.minPageSize = Math.max(1, maxPageSize / 2); + this.remainingDocs = limit; + this.docsBuilder = IntVector.newVectorBuilder(Math.min(limit, maxPageSize)); + this.leafCollector = new LeafCollector() { + @Override + public void setScorer(Scorable scorer) { - LuceneSourceOperator(Weight weight, int shardId, List leaves, int maxPageSize, int maxCollectedDocs) { - super(weight, shardId, leaves, maxPageSize); - this.currentDocsBuilder = IntVector.newVectorBuilder(maxPageSize); - this.maxCollectedDocs = maxCollectedDocs; - } - - @Override - LuceneOperator docSliceLuceneOperator(List slice) { - return new LuceneSourceOperator(weight, shardId, slice, maxPageSize, maxCollectedDocs); - } + } - @Override - LuceneOperator segmentSliceLuceneOperator(IndexSearcher.LeafSlice leafSlice) { - return new LuceneSourceOperator( - weight, - shardId, - Arrays.asList(leafSlice.leaves).stream().map(PartialLeafReaderContext::new).collect(Collectors.toList()), - maxPageSize, - maxCollectedDocs - ); + @Override + public void collect(int doc) { + if (remainingDocs > 0) { + --remainingDocs; + docsBuilder.appendInt(doc); + currentPagePos++; + } + } + }; } @Override - protected boolean doneCollecting() { - return currentLeaf >= leaves.size() || numCollectedDocs >= maxCollectedDocs; + public boolean isFinished() { + return doneCollecting; } @Override - public boolean isFinished() { - return doneCollecting(); + public void finish() { + doneCollecting = true; } @Override public Page getOutput() { if (isFinished()) { + assert currentPagePos == 0 : currentPagePos; return null; } - - // initialize weight if not done yet - initializeWeightIfNecessary(); - - // if there are documents matching, initialize currentLeafReaderContext, currentScorer, and currentScorerPos when we switch - // to a new leaf reader, otherwise return - if (maybeReturnEarlyOrInitializeScorer()) { - return null; - } - - Page page = null; - try { - currentScorerPos = currentScorer.score(new LeafCollector() { - @Override - public void setScorer(Scorable scorer) { - // ignore - } - - @Override - public void collect(int doc) { - if (numCollectedDocs < maxCollectedDocs) { - currentDocsBuilder.appendInt(doc); - numCollectedDocs++; - currentPagePos++; - } - } - }, - currentLeafReaderContext.leafReaderContext.reader().getLiveDocs(), - currentScorerPos, + final LuceneScorer scorer = getCurrentOrLoadNextScorer(); + if (scorer == null) { + return null; + } + scorer.scoreNextRange( + leafCollector, + scorer.leafReaderContext().reader().getLiveDocs(), // Note: if (maxPageSize - currentPagePos) is a small "remaining" interval, this could lead to slow collection with a // highly selective filter. Having a large "enough" difference between max- and minPageSize (and thus currentPagePos) // alleviates this issue. - Math.min(currentLeafReaderContext.maxDoc, currentScorerPos + maxPageSize - currentPagePos) + maxPageSize - currentPagePos ); - - if (currentPagePos >= minPageSize - || currentScorerPos >= currentLeafReaderContext.maxDoc - || numCollectedDocs >= maxCollectedDocs) { + Page page = null; + if (currentPagePos >= minPageSize || remainingDocs <= 0 || scorer.isDone()) { + pagesEmitted++; page = new Page( currentPagePos, new DocVector( - IntBlock.newConstantBlockWith(shardId, currentPagePos).asVector(), - IntBlock.newConstantBlockWith(currentLeafReaderContext.leafReaderContext.ord, currentPagePos).asVector(), - currentDocsBuilder.build(), + IntBlock.newConstantBlockWith(scorer.shardIndex(), currentPagePos).asVector(), + IntBlock.newConstantBlockWith(scorer.leafReaderContext().ord, currentPagePos).asVector(), + docsBuilder.build(), true ).asBlock() ); - currentDocsBuilder = IntVector.newVectorBuilder(maxPageSize); + docsBuilder = IntVector.newVectorBuilder(Math.min(remainingDocs, maxPageSize)); currentPagePos = 0; } - - if (currentScorerPos >= currentLeafReaderContext.maxDoc) { - currentLeaf++; - currentLeafReaderContext = null; - currentScorer = null; - currentScorerPos = 0; - } + return page; } catch (IOException e) { throw new UncheckedIOException(e); } - - pagesEmitted++; - return page; } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java index c2725596adb92..48389d31e08be 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperator.java @@ -7,26 +7,21 @@ package org.elasticsearch.compute.lucene; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.ReaderUtil; import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.CollectorManager; -import org.apache.lucene.search.ConstantScoreQuery; -import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; import org.apache.lucene.search.Query; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Sort; import org.apache.lucene.search.TopFieldCollector; -import org.apache.lucene.search.TopFieldDocs; -import org.apache.lucene.search.Weight; import org.elasticsearch.common.Strings; import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.DriverContext; +import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.sort.SortAndFormats; import org.elasticsearch.search.sort.SortBuilder; @@ -42,22 +37,9 @@ /** * Source operator that builds Pages out of the output of a TopFieldCollector (aka TopN) */ -public class LuceneTopNSourceOperator extends LuceneOperator { - - private Thread currentThread; - - private final TopFieldCollector topFieldCollector;// this should only be created via the collector manager - - private LeafCollector currentLeafCollector; - - private final List leafReaderContexts; - - private final CollectorManager collectorManager;// one for each shard - - private LeafReaderContext previousLeafReaderContext; - +public final class LuceneTopNSourceOperator extends LuceneOperator { /** - * Collected docs. {@code null} until we're {@link #doneCollecting}. + * Collected docs. {@code null} until we're {@link #emit(boolean)}. */ private ScoreDoc[] scoreDocs; /** @@ -65,43 +47,25 @@ public class LuceneTopNSourceOperator extends LuceneOperator { */ private int offset = 0; - public LuceneTopNSourceOperator(IndexReader reader, int shardId, Sort sort, Query query, int maxPageSize, int limit) { - super(reader, shardId, query, maxPageSize); - this.leafReaderContexts = reader.leaves(); - this.collectorManager = TopFieldCollector.createSharedManager(sort, limit, null, 0); - try { - this.topFieldCollector = collectorManager.newCollector(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - this.currentThread = Thread.currentThread(); - } + private PerShardCollector perShardCollector; + private final List> sorts; + private final int limit; - private LuceneTopNSourceOperator( - Weight weight, - int shardId, - List leaves, - List leafReaderContexts, - CollectorManager collectorManager, - Thread currentThread, - int maxPageSize - ) { - super(weight, shardId, leaves, maxPageSize); - this.leafReaderContexts = leafReaderContexts; - this.collectorManager = collectorManager; - try { - this.topFieldCollector = collectorManager.newCollector(); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - this.currentThread = currentThread; + public LuceneTopNSourceOperator(int maxPageSize, List> sorts, int limit, LuceneSliceQueue sliceQueue) { + super(maxPageSize, sliceQueue); + this.sorts = sorts; + this.limit = limit; } - public static class LuceneTopNSourceOperatorFactory extends LuceneOperatorFactory { - + public static final class Factory implements LuceneOperator.Factory { + private final int taskConcurrency; + private final int maxPageSize; private final List> sorts; + private final int limit; + private final DataPartitioning dataPartitioning; + private final LuceneSliceQueue sliceQueue; - public LuceneTopNSourceOperatorFactory( + public Factory( List searchContexts, Function queryFunction, DataPartitioning dataPartitioning, @@ -110,37 +74,36 @@ public LuceneTopNSourceOperatorFactory( int limit, List> sorts ) { - super(searchContexts, queryFunction, dataPartitioning, taskConcurrency, maxPageSize, limit); - assert sorts != null; + this.maxPageSize = maxPageSize; this.sorts = sorts; + this.limit = limit; + this.dataPartitioning = dataPartitioning; + var weightFunction = weightFunction(queryFunction, ScoreMode.TOP_DOCS); + this.sliceQueue = LuceneSliceQueue.create(searchContexts, weightFunction, dataPartitioning, taskConcurrency); + this.taskConcurrency = Math.min(sliceQueue.totalSlices(), taskConcurrency); } @Override - LuceneOperator luceneOperatorForShard(int shardIndex) { - final SearchContext ctx = searchContexts.get(shardIndex); - final Query query = queryFunction.apply(ctx); - Sort sort = null; - try { - Optional optionalSort = SortBuilder.buildSort(sorts, ctx.getSearchExecutionContext()); - if (optionalSort.isPresent()) { - sort = optionalSort.get().sort; - } - } catch (IOException e) { - throw new UncheckedIOException(e); - } - return new LuceneTopNSourceOperator( - ctx.getSearchExecutionContext().getIndexReader(), - shardIndex, - sort, - query, - maxPageSize, - limit - ); + public SourceOperator get(DriverContext driverContext) { + return new LuceneTopNSourceOperator(maxPageSize, sorts, limit, sliceQueue); + } + + @Override + public int taskConcurrency() { + return taskConcurrency; + } + + public int maxPageSize() { + return maxPageSize; + } + + public int limit() { + return limit; } @Override public String describe() { - String notPrettySorts = sorts.stream().map(s -> Strings.toString(s)).collect(Collectors.joining(",")); + String notPrettySorts = sorts.stream().map(Strings::toString).collect(Collectors.joining(",")); return "LuceneTopNSourceOperator[dataPartitioning = " + dataPartitioning + ", maxPageSize = " @@ -151,118 +114,76 @@ public String describe() { + notPrettySorts + "]]"; } - - } - - @Override - LuceneOperator docSliceLuceneOperator(List slice) { - return new LuceneTopNSourceOperator(weight, shardId, slice, leafReaderContexts, collectorManager, currentThread, maxPageSize); } @Override - LuceneOperator segmentSliceLuceneOperator(IndexSearcher.LeafSlice leafSlice) { - return new LuceneTopNSourceOperator( - weight, - shardId, - Arrays.asList(leafSlice.leaves).stream().map(PartialLeafReaderContext::new).collect(Collectors.toList()), - leafReaderContexts, - collectorManager, - currentThread, - maxPageSize - ); - } - - @Override - void initializeWeightIfNecessary() { - if (weight == null) { - try { - IndexSearcher indexSearcher = new IndexSearcher(indexReader); - weight = indexSearcher.createWeight(indexSearcher.rewrite(new ConstantScoreQuery(query)), ScoreMode.TOP_DOCS, 1); - } catch (IOException e) { - throw new UncheckedIOException(e); - } - } - } - - @Override - protected boolean doneCollecting() { - return currentLeaf >= leaves.size(); - } - - private boolean doneEmitting() { - /* - * If there aren't any leaves then we never initialize scoreDocs. - */ - return leaves.isEmpty() || offset >= scoreDocs.length; + public boolean isFinished() { + return doneCollecting && isEmitting() == false; } @Override - public boolean isFinished() { - return doneCollecting() && doneEmitting(); + public void finish() { + doneCollecting = true; + scoreDocs = null; + assert isFinished(); } @Override public Page getOutput() { - if (doneCollecting()) { - return emit(); + if (isFinished()) { + return null; + } + if (isEmitting()) { + return emit(false); + } else { + return collect(); } - return collect(); } private Page collect() { - assert false == doneCollecting(); - // initialize weight if not done yet - initializeWeightIfNecessary(); - - // if there are documents matching, initialize currentLeafReaderContext and currentScorer when we switch to a new group in the slice - if (maybeReturnEarlyOrInitializeScorer()) { - // if there are no more documents matching and we reached the final slice, build the Page - scoreDocs = topFieldCollector.topDocs().scoreDocs; - return emit(); + assert doneCollecting == false; + var scorer = getCurrentOrLoadNextScorer(); + if (scorer == null) { + doneCollecting = true; + return emit(true); } - try { - // one leaf collector per thread and per segment/leaf - if (currentLeafCollector == null - || currentThread.equals(Thread.currentThread()) == false - || previousLeafReaderContext != currentLeafReaderContext.leafReaderContext) { - currentLeafCollector = topFieldCollector.getLeafCollector(currentLeafReaderContext.leafReaderContext); - currentThread = Thread.currentThread(); - previousLeafReaderContext = currentLeafReaderContext.leafReaderContext; - } - - try { - currentScorerPos = currentScorer.score( - currentLeafCollector, - currentLeafReaderContext.leafReaderContext.reader().getLiveDocs(), - currentScorerPos, - Math.min(currentLeafReaderContext.maxDoc, currentScorerPos + maxPageSize) - ); - } catch (CollectionTerminatedException cte) { - // Lucene terminated early the collection (doing topN for an index that's sorted and the topN uses the same sorting) - currentScorerPos = currentLeafReaderContext.maxDoc; + if (perShardCollector == null || perShardCollector.shardIndex != scorer.shardIndex()) { + // TODO: share the bottom between shardCollectors + perShardCollector = new PerShardCollector(scorer.shardIndex(), scorer.searchContext(), sorts, limit); } + var leafCollector = perShardCollector.getLeafCollector(scorer.leafReaderContext()); + scorer.scoreNextRange(leafCollector, scorer.leafReaderContext().reader().getLiveDocs(), maxPageSize); + } catch (CollectionTerminatedException cte) { + // Lucene terminated early the collection (doing topN for an index that's sorted and the topN uses the same sorting) + scorer.markAsDone(); } catch (IOException e) { throw new UncheckedIOException(e); } - if (currentScorerPos >= currentLeafReaderContext.maxDoc) { - // move to the next leaf if we are done reading from the current leaf (current scorer position reached the final doc) - currentLeaf++; - currentLeafReaderContext = null; - currentScorer = null; - currentScorerPos = 0; - } - if (doneCollecting()) { - // we reached the final leaf in this slice/operator, build the single Page this operator should create - scoreDocs = topFieldCollector.topDocs().scoreDocs; - return emit(); + if (scorer.isDone()) { + var nextScorer = getCurrentOrLoadNextScorer(); + if (nextScorer == null || nextScorer.shardIndex() != scorer.shardIndex()) { + return emit(true); + } } return null; } - private Page emit() { - assert doneCollecting(); - if (doneEmitting()) { + private boolean isEmitting() { + return scoreDocs != null && offset < scoreDocs.length; + } + + private Page emit(boolean startEmitting) { + if (startEmitting) { + assert isEmitting() == false : "offset=" + offset + " score_docs=" + Arrays.toString(scoreDocs); + offset = 0; + if (perShardCollector != null) { + scoreDocs = perShardCollector.topFieldCollector.topDocs().scoreDocs; + } else { + scoreDocs = new ScoreDoc[0]; + } + } + if (offset >= scoreDocs.length) { return null; } int size = Math.min(maxPageSize, scoreDocs.length - offset); @@ -271,22 +192,52 @@ private Page emit() { int start = offset; offset += size; + List leafContexts = perShardCollector.searchContext.searcher().getLeafContexts(); for (int i = start; i < offset; i++) { int doc = scoreDocs[i].doc; - int segment = ReaderUtil.subIndex(doc, leafReaderContexts); + int segment = ReaderUtil.subIndex(doc, leafContexts); currentSegmentBuilder.appendInt(segment); - currentDocsBuilder.appendInt(doc - leafReaderContexts.get(segment).docBase); // the offset inside the segment + currentDocsBuilder.appendInt(doc - leafContexts.get(segment).docBase); // the offset inside the segment } pagesEmitted++; return new Page( size, new DocVector( - IntBlock.newConstantBlockWith(shardId, size).asVector(), + IntBlock.newConstantBlockWith(perShardCollector.shardIndex, size).asVector(), currentSegmentBuilder.build(), currentDocsBuilder.build(), null ).asBlock() ); } + + static final class PerShardCollector { + private final int shardIndex; + private final SearchContext searchContext; + private final TopFieldCollector topFieldCollector; + private int leafIndex; + private LeafCollector leafCollector; + private Thread currentThread; + + PerShardCollector(int shardIndex, SearchContext searchContext, List> sorts, int limit) throws IOException { + this.shardIndex = shardIndex; + this.searchContext = searchContext; + Optional sortAndFormats = SortBuilder.buildSort(sorts, searchContext.getSearchExecutionContext()); + if (sortAndFormats.isEmpty()) { + throw new IllegalStateException("sorts must not be disabled in TopN"); + } + // We don't use CollectorManager here as we don't retrieve the total hits and sort by score. + this.topFieldCollector = TopFieldCollector.create(sortAndFormats.get().sort, limit, 0); + } + + LeafCollector getLeafCollector(LeafReaderContext leafReaderContext) throws IOException { + if (currentThread != Thread.currentThread() || leafIndex != leafReaderContext.ord) { + leafCollector = topFieldCollector.getLeafCollector(leafReaderContext); + leafIndex = leafReaderContext.ord; + currentThread = Thread.currentThread(); + } + return leafCollector; + } + } } diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/PartialLeafReaderContext.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/PartialLeafReaderContext.java new file mode 100644 index 0000000000000..964827a41516e --- /dev/null +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/PartialLeafReaderContext.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.lucene; + +import org.apache.lucene.index.LeafReaderContext; + +public final class PartialLeafReaderContext { + + final LeafReaderContext leafReaderContext; + final int minDoc; // incl + final int maxDoc; // excl + + public PartialLeafReaderContext(LeafReaderContext leafReaderContext, int minDoc, int maxDoc) { + this.leafReaderContext = leafReaderContext; + this.minDoc = minDoc; + this.maxDoc = maxDoc; + } + + public PartialLeafReaderContext(LeafReaderContext leafReaderContext) { + this(leafReaderContext, 0, leafReaderContext.reader().maxDoc()); + } +} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java index 3d20cd069e164..b3ac80ee099b7 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperator.java @@ -160,7 +160,7 @@ public static class Status extends AbstractPageMappingOperator.Status { @Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); - out.writeMap(readersBuilt, StreamOutput::writeString, StreamOutput::writeVInt); + out.writeMap(readersBuilt, StreamOutput::writeVInt); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java index 36bd6fc8cc53f..1a33bbbb9ff3a 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/DriverStatus.java @@ -59,7 +59,7 @@ public class DriverStatus implements Task.Status { } DriverStatus(StreamInput in) throws IOException { - this(in.readString(), in.readLong(), Status.valueOf(in.readString()), in.readImmutableList(OperatorStatus::new)); + this(in.readString(), in.readLong(), Status.valueOf(in.readString()), in.readCollectionAsImmutableList(OperatorStatus::new)); } @Override @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(sessionId); out.writeLong(lastUpdated); out.writeString(status.toString()); - out.writeList(activeOperators); + out.writeCollection(activeOperators); } @Override diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EmptySourceOperator.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EmptySourceOperator.java deleted file mode 100644 index 58496bc16a53e..0000000000000 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/EmptySourceOperator.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.compute.operator; - -import org.elasticsearch.compute.data.Page; - -/** - * An empty source operator, which is already finished and never emits any output. - */ -public final class EmptySourceOperator extends SourceOperator { - - public static class Factory implements SourceOperatorFactory { - @Override - public String describe() { - return "EmptySourceOperator[]"; - } - - @Override - public SourceOperator get(DriverContext driverContext) { - return new EmptySourceOperator(); - } - } - - @Override - public void finish() { - - } - - @Override - public boolean isFinished() { - return true; - } - - @Override - public Page getOutput() { - return null; - } - - @Override - public void close() { - - } -} diff --git a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java index 1054e240eed6b..c3efd67579d66 100644 --- a/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java +++ b/x-pack/plugin/esql/compute/src/main/java/org/elasticsearch/compute/operator/exchange/ExchangeService.java @@ -22,6 +22,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.AbstractAsyncTask; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -211,7 +212,7 @@ public void messageReceived(ExchangeRequest request, TransportChannel channel, T private final class InactiveSinksReaper extends AbstractAsyncTask { InactiveSinksReaper(Logger logger, ThreadPool threadPool, TimeValue interval) { - super(logger, threadPool, interval, true); + super(logger, threadPool, EsExecutors.DIRECT_EXECUTOR_SERVICE, interval, true); rescheduleIfNecessary(); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java index 114576b7bed7e..639e8e401a726 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/OperatorTests.java @@ -10,7 +10,6 @@ import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.LongPoint; -import org.apache.lucene.document.NumericDocValuesField; import org.apache.lucene.document.SortedSetDocValuesField; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexReader; @@ -24,8 +23,6 @@ import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; import org.apache.lucene.tests.store.BaseDirectoryWrapper; @@ -47,11 +44,10 @@ import org.elasticsearch.compute.data.IntVector; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.lucene.DataPartitioning; import org.elasticsearch.compute.lucene.LuceneOperator; import org.elasticsearch.compute.lucene.LuceneSourceOperator; -import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.lucene.ValueSourceInfo; -import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; import org.elasticsearch.compute.operator.AbstractPageMappingOperator; import org.elasticsearch.compute.operator.Driver; import org.elasticsearch.compute.operator.DriverContext; @@ -62,19 +58,17 @@ import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.compute.operator.SequenceLongBlockSourceOperator; -import org.elasticsearch.compute.operator.TopNOperator; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Releasables; -import org.elasticsearch.index.fielddata.IndexNumericFieldData; import org.elasticsearch.index.fielddata.SortedBinaryDocValues; -import org.elasticsearch.index.fielddata.plain.SortedNumericIndexFieldData; import org.elasticsearch.index.mapper.KeywordFieldMapper; +import org.elasticsearch.index.mapper.MapperServiceTestCase; import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; import org.elasticsearch.search.aggregations.support.ValuesSource; +import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.ql.util.Holder; import java.io.IOException; import java.util.ArrayList; @@ -84,164 +78,18 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.atomic.AtomicInteger; import java.util.function.LongUnaryOperator; import static org.elasticsearch.compute.aggregation.AggregatorMode.FINAL; import static org.elasticsearch.compute.aggregation.AggregatorMode.INITIAL; +import static org.elasticsearch.compute.lucene.LuceneSourceOperatorTests.mockSearchContext; import static org.elasticsearch.compute.operator.OperatorTestCase.randomPageSize; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; -public class OperatorTests extends ESTestCase { - - public void testLuceneOperatorsLimit() throws IOException { - final int numDocs = randomIntBetween(10_000, 100_000); - try (Directory dir = newDirectory(); RandomIndexWriter w = writeTestDocs(dir, numDocs, "value", null)) { - try (IndexReader reader = w.getReader()) { - AtomicInteger rowCount = new AtomicInteger(); - final int limit = randomIntBetween(1, numDocs * 2); - DriverContext driverContext = new DriverContext(); - try ( - Driver driver = new Driver( - driverContext, - new LuceneSourceOperator(reader, 0, new MatchAllDocsQuery(), randomPageSize(), limit), - Collections.emptyList(), - new PageConsumerOperator(page -> rowCount.addAndGet(page.getPositionCount())), - () -> {} - ) - ) { - OperatorTestCase.runDriver(driver); - } - assertEquals(Math.min(limit, numDocs), rowCount.get()); - assertDriverContext(driverContext); - } - } - } - - public void testLuceneTopNSourceOperator() throws IOException { - final int numDocs = randomIntBetween(10_000, 100_000); - final int pageSize = randomIntBetween(1_000, 100_000); - final int limit = randomIntBetween(1, pageSize); - String fieldName = "value"; - - try (Directory dir = newDirectory(); RandomIndexWriter w = writeTestDocs(dir, numDocs, fieldName, null)) { - ValuesSource vs = new ValuesSource.Numeric.FieldData( - new SortedNumericIndexFieldData( - fieldName, - IndexNumericFieldData.NumericType.LONG, - IndexNumericFieldData.NumericType.LONG.getValuesSourceType(), - null - ) - ); - try (IndexReader reader = w.getReader()) { - AtomicInteger rowCount = new AtomicInteger(); - Sort sort = new Sort(new SortField(fieldName, SortField.Type.LONG)); - Holder expectedValue = new Holder<>(0L); - DriverContext driverContext = new DriverContext(); - try ( - Driver driver = new Driver( - driverContext, - new LuceneTopNSourceOperator(reader, 0, sort, new MatchAllDocsQuery(), pageSize, limit), - List.of( - new ValuesSourceReaderOperator( - List.of(new ValueSourceInfo(CoreValuesSourceType.NUMERIC, vs, ElementType.LONG, reader)), - 0, - fieldName - ), - new TopNOperator(limit, List.of(new TopNOperator.SortOrder(1, true, true)), randomPageSize()) - ), - new PageConsumerOperator(page -> { - rowCount.addAndGet(page.getPositionCount()); - for (int i = 0; i < page.getPositionCount(); i++) { - LongBlock longValuesBlock = page.getBlock(1); - long expected = expectedValue.get(); - assertEquals(expected, longValuesBlock.getLong(i)); - expectedValue.set(expected + 1); - } - }), - () -> {} - ) - ) { - OperatorTestCase.runDriver(driver); - } - assertEquals(Math.min(limit, numDocs), rowCount.get()); - assertDriverContext(driverContext); - } - } - } - - public void testOperatorsWithLuceneSlicing() throws IOException { - final String fieldName = "value"; - final int numDocs = 100000; - try (Directory dir = newDirectory(); RandomIndexWriter w = writeTestDocs(dir, numDocs, fieldName, randomIntBetween(1, 10))) { - ValuesSource vs = new ValuesSource.Numeric.FieldData( - new SortedNumericIndexFieldData( - fieldName, - IndexNumericFieldData.NumericType.LONG, - IndexNumericFieldData.NumericType.LONG.getValuesSourceType(), - null - ) - ); - - try (IndexReader reader = w.getReader()) { - AtomicInteger rowCount = new AtomicInteger(); - - List drivers = new ArrayList<>(); - LuceneSourceOperator luceneOperator = new LuceneSourceOperator( - reader, - 0, - new MatchAllDocsQuery(), - randomPageSize(), - LuceneOperator.NO_LIMIT - ); - try { - for (LuceneOperator luceneSourceOperator : luceneOperator.docSlice(randomIntBetween(1, 10))) { - drivers.add( - new Driver( - new DriverContext(), - luceneSourceOperator, - List.of( - new ValuesSourceReaderOperator( - List.of(new ValueSourceInfo(CoreValuesSourceType.NUMERIC, vs, ElementType.LONG, reader)), - 0, - fieldName - ) - ), - new PageConsumerOperator(page -> rowCount.addAndGet(page.getPositionCount())), - () -> {} - ) - ); - } - OperatorTestCase.runDriver(drivers); - } finally { - Releasables.close(drivers); - } - assertEquals(numDocs, rowCount.get()); - drivers.stream().map(Driver::driverContext).forEach(OperatorTests::assertDriverContext); - } - } - } - - private static RandomIndexWriter writeTestDocs(Directory dir, int numDocs, String fieldName, Integer maxSegmentCount) - throws IOException { - RandomIndexWriter w = new RandomIndexWriter(random(), dir); - Document doc = new Document(); - NumericDocValuesField docValuesField = new NumericDocValuesField(fieldName, 0); - for (int i = 0; i < numDocs; i++) { - doc.clear(); - docValuesField.setLongValue(i); - doc.add(docValuesField); - w.addDocument(doc); - } - if (maxSegmentCount != null && randomBoolean()) { - w.forceMerge(randomIntBetween(1, 10)); - } - w.commit(); - - return w; - } +// TODO: Move these tests to the right test classes. +public class OperatorTests extends MapperServiceTestCase { public void testQueryOperator() throws IOException { Map docs = new HashMap<>(); @@ -249,24 +97,11 @@ public void testQueryOperator() throws IOException { final long from = randomBoolean() ? Long.MIN_VALUE : randomLongBetween(0, 10000); final long to = randomBoolean() ? Long.MAX_VALUE : randomLongBetween(from, from + 10000); final Query query = LongPoint.newRangeQuery("pt", from, to); - final String partition = randomFrom("shard", "segment", "doc"); - final LuceneSourceOperator luceneOperator = new LuceneSourceOperator( - reader, - 0, - query, - randomPageSize(), - LuceneOperator.NO_LIMIT - ); - final List queryOperators = switch (partition) { - case "shard" -> List.of(luceneOperator); - case "segment" -> luceneOperator.segmentSlice(); - case "doc" -> luceneOperator.docSlice(randomIntBetween(1, 10)); - default -> throw new AssertionError("unknown partition [" + partition + "]"); - }; + LuceneOperator.Factory factory = luceneOperatorFactory(reader, query, LuceneOperator.NO_LIMIT); List drivers = new ArrayList<>(); try { Set actualDocIds = Collections.newSetFromMap(ConcurrentCollections.newConcurrentMap()); - for (LuceneOperator queryOperator : queryOperators) { + for (int t = 0; t < factory.taskConcurrency(); t++) { PageConsumerOperator docCollector = new PageConsumerOperator(page -> { DocVector docVector = page.getBlock(0).asVector(); IntVector doc = docVector.docs(); @@ -277,11 +112,12 @@ public void testQueryOperator() throws IOException { assertTrue("duplicated docId=" + docId, actualDocIds.add(docId)); } }); - drivers.add(new Driver(new DriverContext(), queryOperator, List.of(), docCollector, () -> {})); + DriverContext driverContext = new DriverContext(); + drivers.add(new Driver(driverContext, factory.get(driverContext), List.of(), docCollector, () -> {})); } OperatorTestCase.runDriver(drivers); Set expectedDocIds = searchForDocIds(reader, query); - assertThat("query=" + query + ", partition=" + partition, actualDocIds, equalTo(expectedDocIds)); + assertThat("query=" + query, actualDocIds, equalTo(expectedDocIds)); drivers.stream().map(Driver::driverContext).forEach(OperatorTests::assertDriverContext); } finally { Releasables.close(drivers); @@ -375,9 +211,10 @@ public String toString() { try (DirectoryReader reader = writer.getReader()) { DriverContext driverContext = new DriverContext(); + Driver driver = new Driver( driverContext, - new LuceneSourceOperator(reader, 0, new MatchAllDocsQuery(), randomPageSize(), LuceneOperator.NO_LIMIT), + luceneOperatorFactory(reader, new MatchAllDocsQuery(), LuceneOperator.NO_LIMIT).get(driverContext), List.of(shuffleDocsOperator, new AbstractPageMappingOperator() { @Override protected Page process(Page page) { @@ -554,4 +391,16 @@ public static void assertDriverContext(DriverContext driverContext) { assertTrue(driverContext.isFinished()); assertThat(driverContext.getSnapshot().releasables(), empty()); } + + static LuceneOperator.Factory luceneOperatorFactory(IndexReader reader, Query query, int limit) { + final SearchContext searchContext = mockSearchContext(reader); + return new LuceneSourceOperator.Factory( + List.of(searchContext), + ctx -> query, + randomFrom(DataPartitioning.values()), + randomIntBetween(1, 10), + randomPageSize(), + limit + ); + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java index d79e99cb28225..ca7ef54f7f321 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BasicPageTests.java @@ -182,10 +182,10 @@ public void testSerializationListPages() throws IOException { new Page(BytesRefBlock.newConstantBlockWith(new BytesRef("Hello World"), positions)) ); final BytesStreamOutput out = new BytesStreamOutput(); - out.writeList(origPages); + out.writeCollection(origPages); StreamInput in = new NamedWriteableAwareStreamInput(ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), registry); - List deserPages = in.readList(new Page.PageReader()); + List deserPages = in.readCollectionAsList(new Page.PageReader()); EqualsHashCodeTestUtils.checkEqualsAndHashCode(origPages, unused -> deserPages); } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java new file mode 100644 index 0000000000000..5503c02be9794 --- /dev/null +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockAccountingTests.java @@ -0,0 +1,230 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.compute.data; + +import org.apache.lucene.tests.util.RamUsageTester; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.util.BigArray; +import org.elasticsearch.common.util.BigArrays; +import org.elasticsearch.common.util.BytesRefArray; +import org.elasticsearch.test.ESTestCase; +import org.hamcrest.Matcher; + +import java.lang.reflect.Field; +import java.util.BitSet; +import java.util.Collection; +import java.util.Map; + +import static org.apache.lucene.util.RamUsageEstimator.alignObjectSize; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class BlockAccountingTests extends ESTestCase { + + // A large(ish) upperbound simply so that effective greaterThan assertions are not unbounded + static final long UPPER_BOUND = 10_000; + + // Array Vectors + public void testBooleanVector() { + Vector empty = new BooleanArrayVector(new boolean[] {}, 0); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Vector emptyPlusOne = new BooleanArrayVector(new boolean[] { randomBoolean() }, 1); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1))); + + boolean[] randomData = new boolean[randomIntBetween(1, 1024)]; + Vector emptyPlusSome = new BooleanArrayVector(randomData, randomData.length); + assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); + + // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability + Vector filterVector = emptyPlusSome.filter(1); + assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testIntVector() { + Vector empty = new IntArrayVector(new int[] {}, 0); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Vector emptyPlusOne = new IntArrayVector(new int[] { randomInt() }, 1); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Integer.BYTES))); + + int[] randomData = new int[randomIntBetween(1, 1024)]; + Vector emptyPlusSome = new IntArrayVector(randomData, randomData.length); + assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); + + // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability + Vector filterVector = emptyPlusSome.filter(1); + assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testLongVector() { + Vector empty = new LongArrayVector(new long[] {}, 0); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Vector emptyPlusOne = new LongArrayVector(new long[] { randomLong() }, 1); + assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Long.BYTES)); + + long[] randomData = new long[randomIntBetween(1, 1024)]; + Vector emptyPlusSome = new LongArrayVector(randomData, randomData.length); + assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length)); + + // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability + Vector filterVector = emptyPlusSome.filter(1); + assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testDoubleVector() { + Vector empty = new DoubleArrayVector(new double[] {}, 0); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Vector emptyPlusOne = new DoubleArrayVector(new double[] { randomDouble() }, 1); + assertThat(emptyPlusOne.ramBytesUsed(), is(empty.ramBytesUsed() + Double.BYTES)); + + double[] randomData = new double[randomIntBetween(1, 1024)]; + Vector emptyPlusSome = new DoubleArrayVector(randomData, randomData.length); + assertThat(emptyPlusSome.ramBytesUsed(), is(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length)); + + // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability + Vector filterVector = emptyPlusSome.filter(1); + assertThat(filterVector.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testBytesRefVector() { + try ( + var emptyArray = new BytesRefArray(0, BigArrays.NON_RECYCLING_INSTANCE); + var arrayWithOne = new BytesRefArray(0, BigArrays.NON_RECYCLING_INSTANCE) + ) { + var acc = new RamUsageTester.Accumulator() { + @Override + public long accumulateObject(Object o, long shallowSize, Map fieldValues, Collection queue) { + for (var entry : fieldValues.entrySet()) { + if (entry.getKey().getType().equals(BigArrays.class)) { + // skip BigArrays, as it is (correctly) not part of the ramBytesUsed for BytesRefArray + } else if (o instanceof BigArray bigArray) { + return bigArray.ramBytesUsed(); + } else { + queue.add(entry.getValue()); + } + } + return shallowSize; + } + }; + Vector emptyVector = new BytesRefArrayVector(emptyArray, 0); + long expectedEmptyVectorUsed = RamUsageTester.ramUsed(emptyVector, acc); + assertThat(emptyVector.ramBytesUsed(), is(expectedEmptyVectorUsed)); + + var bytesRef = new BytesRef(randomAlphaOfLengthBetween(1, 16)); + arrayWithOne.append(bytesRef); + Vector emptyPlusOne = new BytesRefArrayVector(arrayWithOne, 1); + assertThat(emptyPlusOne.ramBytesUsed(), between(emptyVector.ramBytesUsed() + bytesRef.length, UPPER_BOUND)); + + // a filter becomes responsible for it's enclosing data, both in terms of accountancy and releasability + Vector filterVector = emptyPlusOne.filter(1); + assertThat(filterVector.ramBytesUsed(), between(emptyPlusOne.ramBytesUsed(), UPPER_BOUND)); + } + } + + // Array Blocks + public void testBooleanBlock() { + Block empty = new BooleanArrayBlock(new boolean[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Block emptyPlusOne = new BooleanArrayBlock(new boolean[] { randomBoolean() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + 1))); + + boolean[] randomData = new boolean[randomIntBetween(1, 1024)]; + Block emptyPlusSome = new BooleanArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + randomData.length))); + + Block filterBlock = emptyPlusSome.filter(1); + assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testBooleanBlockWithNullFirstValues() { + Block empty = new BooleanArrayBlock(new boolean[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), lessThanOrEqualTo(expectedEmptyUsed)); + } + + public void testIntBlock() { + Block empty = new IntArrayBlock(new int[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Block emptyPlusOne = new IntArrayBlock(new int[] { randomInt() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Integer.BYTES))); + + int[] randomData = new int[randomIntBetween(1, 1024)]; + Block emptyPlusSome = new IntArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Integer.BYTES * randomData.length))); + + Block filterBlock = emptyPlusSome.filter(1); + assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testIntBlockWithNullFirstValues() { + Block empty = new IntArrayBlock(new int[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + } + + public void testLongBlock() { + Block empty = new LongArrayBlock(new long[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Block emptyPlusOne = new LongArrayBlock(new long[] { randomInt() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Long.BYTES))); + + long[] randomData = new long[randomIntBetween(1, 1024)]; + Block emptyPlusSome = new LongArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Long.BYTES * randomData.length))); + + Block filterBlock = emptyPlusSome.filter(1); + assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testLongBlockWithNullFirstValues() { + Block empty = new LongArrayBlock(new long[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + } + + public void testDoubleBlock() { + Block empty = new DoubleArrayBlock(new double[] {}, 0, new int[] {}, null, Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + + Block emptyPlusOne = new DoubleArrayBlock(new double[] { randomInt() }, 1, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusOne.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + Double.BYTES))); + + double[] randomData = new double[randomIntBetween(1, 1024)]; + Block emptyPlusSome = new DoubleArrayBlock(randomData, randomData.length, new int[] {}, null, Block.MvOrdering.UNORDERED); + assertThat(emptyPlusSome.ramBytesUsed(), is(alignObjectSize(empty.ramBytesUsed() + (long) Double.BYTES * randomData.length))); + + Block filterBlock = emptyPlusSome.filter(1); + assertThat(filterBlock.ramBytesUsed(), between(emptyPlusSome.ramBytesUsed(), UPPER_BOUND)); + } + + public void testDoubleBlockWithNullFirstValues() { + Block empty = new DoubleArrayBlock(new double[] {}, 0, null, BitSet.valueOf(new byte[] { 1 }), Block.MvOrdering.UNORDERED); + long expectedEmptyUsed = RamUsageTester.ramUsed(empty); + assertThat(empty.ramBytesUsed(), is(expectedEmptyUsed)); + } + + static Matcher between(long minInclusive, long maxInclusive) { + return allOf(greaterThanOrEqualTo(minInclusive), lessThanOrEqualTo(maxInclusive)); + } +} diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java index 275479f4c714e..50a3abb1204ad 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockSerializationTests.java @@ -47,18 +47,30 @@ private void assertConstantBlockImpl(Block origBlock) throws IOException { public void testEmptyIntBlock() { assertEmptyBlock(IntBlock.newBlockBuilder(0).build()); + assertEmptyBlock(IntBlock.newBlockBuilder(0).appendNull().build().filter()); + assertEmptyBlock(IntVector.newVectorBuilder(0).build().asBlock()); + assertEmptyBlock(IntVector.newVectorBuilder(0).appendInt(randomInt()).build().filter().asBlock()); } public void testEmptyLongBlock() { assertEmptyBlock(LongBlock.newBlockBuilder(0).build()); + assertEmptyBlock(LongBlock.newBlockBuilder(0).appendNull().build().filter()); + assertEmptyBlock(LongVector.newVectorBuilder(0).build().asBlock()); + assertEmptyBlock(LongVector.newVectorBuilder(0).appendLong(randomLong()).build().filter().asBlock()); } public void testEmptyDoubleBlock() { assertEmptyBlock(DoubleBlock.newBlockBuilder(0).build()); + assertEmptyBlock(DoubleBlock.newBlockBuilder(0).appendNull().build().filter()); + assertEmptyBlock(DoubleVector.newVectorBuilder(0).build().asBlock()); + assertEmptyBlock(DoubleVector.newVectorBuilder(0).appendDouble(randomDouble()).build().filter().asBlock()); } public void testEmptyBytesRefBlock() { assertEmptyBlock(BytesRefBlock.newBlockBuilder(0).build()); + assertEmptyBlock(BytesRefBlock.newBlockBuilder(0).appendNull().build().filter()); + assertEmptyBlock(BytesRefVector.newVectorBuilder(0).build().asBlock()); + assertEmptyBlock(BytesRefVector.newVectorBuilder(0).appendBytesRef(randomBytesRef()).build().filter().asBlock()); } private void assertEmptyBlock(Block origBlock) { @@ -68,23 +80,47 @@ private void assertEmptyBlock(Block origBlock) { public void testFilterIntBlock() throws IOException { assertFilterBlock(IntBlock.newBlockBuilder(0).appendInt(1).appendInt(2).build().filter(1)); + assertFilterBlock(IntBlock.newBlockBuilder(1).appendInt(randomInt()).appendNull().build().filter(0)); + assertFilterBlock(IntVector.newVectorBuilder(1).appendInt(randomInt()).build().filter(0).asBlock()); + assertFilterBlock(IntVector.newVectorBuilder(1).appendInt(randomInt()).appendInt(randomInt()).build().filter(0).asBlock()); } public void testFilterLongBlock() throws IOException { assertFilterBlock(LongBlock.newBlockBuilder(0).appendLong(1).appendLong(2).build().filter(1)); + assertFilterBlock(LongBlock.newBlockBuilder(1).appendLong(randomLong()).appendNull().build().filter(0)); + assertFilterBlock(LongVector.newVectorBuilder(1).appendLong(randomLong()).build().filter(0).asBlock()); + assertFilterBlock(LongVector.newVectorBuilder(1).appendLong(randomLong()).appendLong(randomLong()).build().filter(0).asBlock()); } public void testFilterDoubleBlock() throws IOException { assertFilterBlock(DoubleBlock.newBlockBuilder(0).appendDouble(1).appendDouble(2).build().filter(1)); + assertFilterBlock(DoubleBlock.newBlockBuilder(1).appendDouble(randomDouble()).appendNull().build().filter(0)); + assertFilterBlock(DoubleVector.newVectorBuilder(1).appendDouble(randomDouble()).build().filter(0).asBlock()); + assertFilterBlock( + DoubleVector.newVectorBuilder(1).appendDouble(randomDouble()).appendDouble(randomDouble()).build().filter(0).asBlock() + ); } public void testFilterBytesRefBlock() throws IOException { - BytesRefBlock block = BytesRefBlock.newBlockBuilder(0) - .appendBytesRef(new BytesRef("1")) - .appendBytesRef(new BytesRef("2")) - .build() - .filter(1); - assertFilterBlock(block); + assertFilterBlock( + BytesRefBlock.newBlockBuilder(0) + .appendBytesRef(randomBytesRef()) + .appendBytesRef(randomBytesRef()) + .build() + .filter(randomIntBetween(0, 1)) + ); + assertFilterBlock( + BytesRefBlock.newBlockBuilder(0).appendBytesRef(randomBytesRef()).appendNull().build().filter(randomIntBetween(0, 1)) + ); + assertFilterBlock(BytesRefVector.newVectorBuilder(0).appendBytesRef(randomBytesRef()).build().asBlock().filter(0)); + assertFilterBlock( + BytesRefVector.newVectorBuilder(0) + .appendBytesRef(randomBytesRef()) + .appendBytesRef(randomBytesRef()) + .build() + .asBlock() + .filter(randomIntBetween(0, 1)) + ); } private void assertFilterBlock(Block origBlock) throws IOException { @@ -121,4 +157,8 @@ public void testAggregatorStateBlock() throws IOException { var finalBlock = (LongBlock) finalBlocks[0]; assertThat(finalBlock.getLong(0), is(55L)); } + + static BytesRef randomBytesRef() { + return new BytesRef(randomAlphaOfLengthBetween(0, 10)); + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockValueAsserter.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockValueAsserter.java index 6ad66c54b7568..e03de38d637db 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockValueAsserter.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/BlockValueAsserter.java @@ -77,7 +77,13 @@ private static void assertBytesRefRowValues(BytesRefBlock block, int firstValueI private static void assertBooleanRowValues(BooleanBlock block, int firstValueIndex, int valueCount, List expectedRowValues) { for (int valueIndex = 0; valueIndex < valueCount; valueIndex++) { - boolean expectedValue = (Boolean) expectedRowValues.get(valueIndex); + Object value = expectedRowValues.get(valueIndex); + boolean expectedValue; + if (value instanceof Number number) { + expectedValue = number.intValue() % 2 == 0; + } else { + expectedValue = (Boolean) expectedRowValues.get(valueIndex); + } assertThat(block.getBoolean(firstValueIndex + valueIndex), is(equalTo(expectedValue))); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java index 9e809ac511a79..482a61a329a94 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/MultiValueBlockTests.java @@ -16,6 +16,7 @@ import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; public class MultiValueBlockTests extends SerializationTestCase { @@ -233,4 +234,84 @@ public void testMultiValuesAndNulls() { BlockValueAsserter.assertBlockValues(bytesRefBlock, blockValues); EqualsHashCodeTestUtils.checkEqualsAndHashCode(intBlock, block -> serializeDeserializeBlock(block)); } + + // Tests that the use of Block builder beginPositionEntry (or not) with just a single value, + // and no nulls, builds a block backed by a vector. + public void testSingleNonNullValues() { + List blockValues = new ArrayList<>(); + int positions = randomInt(512); + for (int i = 0; i < positions; i++) { + blockValues.add(randomInt()); + } + + var blocks = List.of( + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.BOOLEAN), + TestBlockBuilder.blockFromValues(blockValues.stream().map(List::of).toList(), ElementType.BOOLEAN), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.INT), + TestBlockBuilder.blockFromValues(blockValues.stream().map(List::of).toList(), ElementType.INT), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.LONG), + TestBlockBuilder.blockFromValues(blockValues.stream().map(List::of).toList(), ElementType.LONG), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.DOUBLE), + TestBlockBuilder.blockFromValues(blockValues.stream().map(List::of).toList(), ElementType.DOUBLE), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.BYTES_REF), + TestBlockBuilder.blockFromValues(blockValues.stream().map(List::of).toList(), ElementType.BYTES_REF) + ); + for (Block block : blocks) { + assertThat(block.asVector(), is(notNullValue())); + BlockValueAsserter.assertBlockValues(block, blockValues.stream().map(List::of).toList()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(block, unused -> serializeDeserializeBlock(block)); + } + } + + // A default max iteration times, just to avoid an infinite loop. + static final int TIMES = 10_000; + + // Tests that the use of Block builder beginPositionEntry (or not) with just a single value, + // with nulls, builds a block not backed by a vector. + public void testSingleWithNullValues() { + List blockValues = new ArrayList<>(); + boolean atLeastOneNull = false; + int positions = randomIntBetween(1, 512); // we must have at least one null entry + int times = 0; + while (atLeastOneNull == false && times < TIMES) { + times++; + for (int i = 0; i < positions; i++) { + boolean isNull = randomBoolean(); + if (isNull) { + atLeastOneNull = true; + blockValues.add(null); // empty / null + } else { + blockValues.add(randomInt()); + } + } + } + assert atLeastOneNull : "Failed to create a values block with at least one null in " + times + " times"; + + var blocks = List.of( + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.BOOLEAN), + TestBlockBuilder.blockFromValues(blockValues.stream().map(MultiValueBlockTests::mapToList).toList(), ElementType.BOOLEAN), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.INT), + TestBlockBuilder.blockFromValues(blockValues.stream().map(MultiValueBlockTests::mapToList).toList(), ElementType.INT), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.LONG), + TestBlockBuilder.blockFromValues(blockValues.stream().map(MultiValueBlockTests::mapToList).toList(), ElementType.LONG), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.DOUBLE), + TestBlockBuilder.blockFromValues(blockValues.stream().map(MultiValueBlockTests::mapToList).toList(), ElementType.DOUBLE), + TestBlockBuilder.blockFromSingleValues(blockValues, ElementType.BYTES_REF), + TestBlockBuilder.blockFromValues(blockValues.stream().map(MultiValueBlockTests::mapToList).toList(), ElementType.BYTES_REF) + ); + for (Block block : blocks) { + assertThat(block.asVector(), is(nullValue())); + BlockValueAsserter.assertBlockValues(block, blockValues.stream().map(MultiValueBlockTests::mapToList).toList()); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(block, unused -> serializeDeserializeBlock(block)); + } + } + + // Returns a list containing the given obj, or an empty list if obj is null + static List mapToList(Object obj) { + if (obj == null) { + return List.of(); + } else { + return List.of(obj); + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java index 37e43571bc9a5..4684da93a661a 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/data/TestBlockBuilder.java @@ -44,6 +44,20 @@ public static Block blockFromValues(List> blockValues, ElementType return builder.build(); } + // Builds a block of single values. Each value can be null or non-null. + // Differs from blockFromValues, as it does not use begin/endPositionEntry + public static Block blockFromSingleValues(List blockValues, ElementType elementType) { + TestBlockBuilder builder = builderOf(elementType); + for (Object rowValue : blockValues) { + if (rowValue == null) { + builder.appendNull(); + } else { + builder.appendObject(rowValue); + } + } + return builder.build(); + } + static TestBlockBuilder builderOf(ElementType type) { return switch (type) { case INT -> new TestIntBlockBuilder(0); @@ -305,6 +319,9 @@ private static class TestBooleanBlockBuilder extends TestBlockBuilder { @Override public TestBlockBuilder appendObject(Object object) { + if (object instanceof Number number) { + object = number.intValue() % 2 == 0; + } builder.appendBoolean((boolean) object); return this; } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java index 149ea1c216e52..be6a4a3cd19fb 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorStatusTests.java @@ -21,7 +21,7 @@ public static LuceneSourceOperator.Status simple() { public static String simpleToJson() { return """ - {"current_leaf":0,"total_leaves":1,"leaf_position":123,"leaf_size":99990,"pages_emitted":5}"""; + {"processed_sliced":0,"total_slices":1,"slice_position":123,"slice_size":99990,"pages_emitted":5}"""; } public void testToXContent() { diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java index 35ac2f588a3ee..f3eef4ea45f90 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneSourceOperatorTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.index.IndexReader; import org.apache.lucene.index.IndexableField; import org.apache.lucene.index.NoMergePolicy; +import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.search.Query; import org.apache.lucene.store.Directory; @@ -26,6 +27,7 @@ import org.elasticsearch.compute.operator.OperatorTestCase; import org.elasticsearch.compute.operator.PageConsumerOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.fielddata.FieldDataContext; import org.elasticsearch.index.fielddata.IndexFieldData; import org.elasticsearch.index.fielddata.IndexFieldDataCache; @@ -35,12 +37,12 @@ import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.index.query.support.NestedScope; import org.elasticsearch.search.aggregations.support.CoreValuesSourceType; +import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.SortBuilder; import org.junit.After; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.ArrayList; import java.util.List; import java.util.function.Function; @@ -66,17 +68,12 @@ public void closeIndex() throws IOException { } @Override - protected LuceneSourceOperator.LuceneSourceOperatorFactory simple(BigArrays bigArrays) { - return simple(bigArrays, DataPartitioning.SHARD, 10_000, 100); + protected LuceneSourceOperator.Factory simple(BigArrays bigArrays) { + return simple(bigArrays, randomFrom(DataPartitioning.values()), between(1, 10_000), 100); } - private LuceneSourceOperator.LuceneSourceOperatorFactory simple( - BigArrays bigArrays, - DataPartitioning dataPartitioning, - int size, - int limit - ) { - int commitEvery = Math.max(1, size / 10); + private LuceneSourceOperator.Factory simple(BigArrays bigArrays, DataPartitioning dataPartitioning, int numDocs, int limit) { + int commitEvery = Math.max(1, numDocs / 10); try ( RandomIndexWriter writer = new RandomIndexWriter( random(), @@ -84,7 +81,7 @@ private LuceneSourceOperator.LuceneSourceOperatorFactory simple( newIndexWriterConfig().setMergePolicy(NoMergePolicy.INSTANCE) ) ) { - for (int d = 0; d < size; d++) { + for (int d = 0; d < numDocs; d++) { List doc = new ArrayList<>(); doc.add(new SortedNumericDocValuesField("s", d)); writer.addDocument(doc); @@ -97,7 +94,7 @@ private LuceneSourceOperator.LuceneSourceOperatorFactory simple( throw new RuntimeException(e); } - SearchContext ctx = mock(SearchContext.class); + SearchContext ctx = mockSearchContext(reader); SearchExecutionContext ectx = mock(SearchExecutionContext.class); when(ctx.getSearchExecutionContext()).thenReturn(ectx); when(ectx.getFieldType(anyString())).thenAnswer(inv -> { @@ -116,17 +113,8 @@ private LuceneSourceOperator.LuceneSourceOperatorFactory simple( when(ectx.nestedLookup()).thenReturn(NestedLookup.EMPTY); when(ectx.getIndexReader()).thenReturn(reader); Function queryFunction = c -> new MatchAllDocsQuery(); - int taskConcurrency = 0; - int maxPageSize = between(10, Math.max(10, size)); - List> sorts = List.of(new FieldSortBuilder("s")); - return new LuceneSourceOperator.LuceneSourceOperatorFactory( - List.of(ctx), - queryFunction, - dataPartitioning, - taskConcurrency, - maxPageSize, - limit - ); + int maxPageSize = between(10, Math.max(10, numDocs)); + return new LuceneSourceOperator.Factory(List.of(ctx), queryFunction, dataPartitioning, 1, maxPageSize, limit); } @Override @@ -156,7 +144,7 @@ public void testEmpty() { private void testSimple(int size, int limit) { DriverContext ctx = new DriverContext(); - LuceneSourceOperator.LuceneSourceOperatorFactory factory = simple(nonBreakingBigArrays(), DataPartitioning.SHARD, size, limit); + LuceneSourceOperator.Factory factory = simple(nonBreakingBigArrays(), DataPartitioning.SHARD, size, limit); Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory( reader, CoreValuesSourceType.NUMERIC, @@ -171,7 +159,7 @@ private void testSimple(int size, int limit) { OperatorTestCase.assertDriverContext(ctx); for (Page page : results) { - assertThat(page.getPositionCount(), lessThanOrEqualTo(factory.maxPageSize)); + assertThat(page.getPositionCount(), lessThanOrEqualTo(factory.maxPageSize())); } for (Page page : results) { @@ -181,7 +169,28 @@ private void testSimple(int size, int limit) { } } int maxPages = Math.min(size, limit); - int minPages = (int) Math.ceil(maxPages / factory.maxPageSize); + int minPages = (int) Math.ceil(maxPages / factory.maxPageSize()); assertThat(results, hasSize(both(greaterThanOrEqualTo(minPages)).and(lessThanOrEqualTo(maxPages)))); } + + /** + * Creates a mock search context with the given index reader. + * The returned mock search context can be used to test with {@link LuceneOperator}. + */ + public static SearchContext mockSearchContext(IndexReader reader) { + try { + ContextIndexSearcher searcher = new ContextIndexSearcher( + reader, + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + TrivialQueryCachingPolicy.NEVER, + true + ); + SearchContext searchContext = mock(SearchContext.class); + when(searchContext.searcher()).thenReturn(searcher); + return searchContext; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java index 7c732ec121ac3..7abf042fa851f 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/LuceneTopNSourceOperatorTests.java @@ -63,16 +63,11 @@ public void closeIndex() throws IOException { } @Override - protected LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory simple(BigArrays bigArrays) { + protected LuceneTopNSourceOperator.Factory simple(BigArrays bigArrays) { return simple(bigArrays, DataPartitioning.SHARD, 10_000, 100); } - private LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory simple( - BigArrays bigArrays, - DataPartitioning dataPartitioning, - int size, - int limit - ) { + private LuceneTopNSourceOperator.Factory simple(BigArrays bigArrays, DataPartitioning dataPartitioning, int size, int limit) { int commitEvery = Math.max(1, size / 10); try ( RandomIndexWriter writer = new RandomIndexWriter( @@ -94,7 +89,7 @@ private LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory simple( throw new RuntimeException(e); } - SearchContext ctx = mock(SearchContext.class); + SearchContext ctx = LuceneSourceOperatorTests.mockSearchContext(reader); SearchExecutionContext ectx = mock(SearchExecutionContext.class); when(ctx.getSearchExecutionContext()).thenReturn(ectx); when(ectx.getFieldType(anyString())).thenAnswer(inv -> { @@ -116,7 +111,7 @@ private LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory simple( int taskConcurrency = 0; int maxPageSize = between(10, Math.max(10, size)); List> sorts = List.of(new FieldSortBuilder("s")); - return new LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory( + return new LuceneTopNSourceOperator.Factory( List.of(ctx), queryFunction, dataPartitioning, @@ -154,12 +149,7 @@ public void testEmpty() { private void testSimple(int size, int limit) { DriverContext ctx = new DriverContext(); - LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory factory = simple( - nonBreakingBigArrays(), - DataPartitioning.SHARD, - size, - limit - ); + LuceneTopNSourceOperator.Factory factory = simple(nonBreakingBigArrays(), DataPartitioning.SHARD, size, limit); Operator.OperatorFactory readS = ValuesSourceReaderOperatorTests.factory( reader, CoreValuesSourceType.NUMERIC, @@ -175,17 +165,17 @@ private void testSimple(int size, int limit) { long expectedS = 0; for (Page page : results) { - if (limit - expectedS < factory.maxPageSize) { + if (limit - expectedS < factory.maxPageSize()) { assertThat(page.getPositionCount(), equalTo((int) (limit - expectedS))); } else { - assertThat(page.getPositionCount(), equalTo(factory.maxPageSize)); + assertThat(page.getPositionCount(), equalTo(factory.maxPageSize())); } LongBlock sBlock = page.getBlock(1); for (int p = 0; p < page.getPositionCount(); p++) { assertThat(sBlock.getLong(sBlock.getFirstValueIndex(p)), equalTo(expectedS++)); } } - int pages = (int) Math.ceil((float) Math.min(size, limit) / factory.maxPageSize); + int pages = (int) Math.ceil((float) Math.min(size, limit) / factory.maxPageSize()); assertThat(results, hasSize(pages)); } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java index b30f1a5c27eff..c2c8c9e05c064 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/lucene/ValuesSourceReaderOperatorTests.java @@ -18,6 +18,8 @@ import org.apache.lucene.search.MatchAllDocsQuery; import org.apache.lucene.store.Directory; import org.apache.lucene.tests.index.RandomIndexWriter; +import org.apache.lucene.tests.mockfile.HandleLimitFS; +import org.apache.lucene.tests.util.LuceneTestCase; import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.common.Randomness; @@ -63,9 +65,17 @@ import java.util.List; import java.util.stream.IntStream; +import static org.elasticsearch.compute.lucene.LuceneSourceOperatorTests.mockSearchContext; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; +/** + * Tests for {@link ValuesSourceReaderOperator}. Turns off {@link HandleLimitFS} + * because it can make a large number of documents and doesn't want to allow + * lucene to merge. It intentionally tries to create many files to make sure + * that {@link ValuesSourceReaderOperator} works with it. + */ +@LuceneTestCase.SuppressFileSystems(value = "HandleLimitFS") public class ValuesSourceReaderOperatorTests extends OperatorTestCase { private static final String[] PREFIX = new String[] { "a", "b", "c" }; private static final boolean[][] BOOLEANS = new boolean[][] { @@ -142,7 +152,15 @@ protected SourceOperator simpleInput(int size) { } catch (IOException e) { throw new RuntimeException(e); } - return new LuceneSourceOperator(reader, 0, new MatchAllDocsQuery(), OperatorTestCase.randomPageSize(), LuceneOperator.NO_LIMIT); + var luceneFactory = new LuceneSourceOperator.Factory( + List.of(mockSearchContext(reader)), + ctx -> new MatchAllDocsQuery(), + randomFrom(DataPartitioning.values()), + randomIntBetween(1, 10), + randomPageSize(), + LuceneOperator.NO_LIMIT + ); + return luceneFactory.get(new DriverContext()); } @Override @@ -373,10 +391,18 @@ public void testValuesSourceReaderOperatorWithNulls() throws IOException { } DriverContext driverContext = new DriverContext(); + var luceneFactory = new LuceneSourceOperator.Factory( + List.of(mockSearchContext(reader)), + ctx -> new MatchAllDocsQuery(), + randomFrom(DataPartitioning.values()), + randomIntBetween(1, 10), + randomPageSize(), + LuceneOperator.NO_LIMIT + ); try ( Driver driver = new Driver( driverContext, - new LuceneSourceOperator(reader, 0, new MatchAllDocsQuery(), randomPageSize(), LuceneOperator.NO_LIMIT), + luceneFactory.get(driverContext), List.of( factory(reader, CoreValuesSourceType.NUMERIC, ElementType.INT, intFt).get(driverContext), factory(reader, CoreValuesSourceType.NUMERIC, ElementType.LONG, longFt).get(driverContext), diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java index 2e5153421b78f..48911c208bdc9 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/AsyncOperatorTests.java @@ -202,7 +202,7 @@ protected void doRun() { } }; TimeValue delay = TimeValue.timeValueMillis(randomIntBetween(0, 50)); - threadPool.schedule(command, delay, ESQL_TEST_EXECUTOR); + threadPool.schedule(command, delay, threadPool.executor(ESQL_TEST_EXECUTOR)); } } } diff --git a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java index a5acd4488a368..5024c28d86a91 100644 --- a/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java +++ b/x-pack/plugin/esql/compute/src/test/java/org/elasticsearch/compute/operator/ForkingOperatorTestCase.java @@ -123,6 +123,7 @@ public final void testInitialIntermediateFinal() { assertDriverContext(driverContext); } + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/99160") public final void testManyInitialManyPartialFinal() { BigArrays bigArrays = nonBreakingBigArrays(); DriverContext driverContext = new DriverContext(); diff --git a/x-pack/plugin/esql/qa/server/multi-node/build.gradle b/x-pack/plugin/esql/qa/server/multi-node/build.gradle new file mode 100644 index 0000000000000..1b62fdea2671c --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/build.gradle @@ -0,0 +1,19 @@ +apply plugin: 'elasticsearch.legacy-yaml-rest-test' + +dependencies { + javaRestTestImplementation project(xpackModule('esql:qa:testFixtures')) +} + +restResources { + restApi { + include '_common', 'bulk', 'indices', 'esql', 'xpack', 'enrich' + } +} + +testClusters.configureEach { + numberOfNodes = 2 + testDistribution = 'DEFAULT' + setting 'xpack.license.self_generated.type', 'trial' + setting 'xpack.monitoring.collection.enabled', 'true' + setting 'xpack.security.enabled', 'false' +} diff --git a/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java new file mode 100644 index 0000000000000..eab26b565f93d --- /dev/null +++ b/x-pack/plugin/esql/qa/server/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/esql/qa/multi_node/EsqlSpecIT.java @@ -0,0 +1,17 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.qa.multi_node; + +import org.elasticsearch.xpack.esql.qa.rest.EsqlSpecTestCase; +import org.elasticsearch.xpack.ql.CsvSpecReader.CsvTestCase; + +public class EsqlSpecIT extends EsqlSpecTestCase { + public EsqlSpecIT(String fileName, String groupName, String testName, Integer lineNumber, CsvTestCase testCase) { + super(fileName, groupName, testName, lineNumber, testCase); + } +} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml index 9716004c3fbc1..fdd5cf2566961 100644 --- a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/10_basic.yml @@ -297,3 +297,29 @@ setup: - match: {columns.2.type: "keyword"} - length: {values: 1} - match: {values.0: [1, 44, "green"]} + +--- +"Test Mixed Input Params": + - do: + esql.query: + body: + query: 'from test | eval x = ?, y = ?, z = ?, t = ?, u = ?, v = ? | keep x, y, z, t, u, v | limit 3' + params: [{"value": 1, "type": "keyword"}, {"value": 2, "type": "double"}, null, true, 123, {"value": 123, "type": "long"}] + + - length: {columns: 6} + - match: {columns.0.name: "x"} + - match: {columns.0.type: "keyword"} + - match: {columns.1.name: "y"} + - match: {columns.1.type: "double"} + - match: {columns.2.name: "z"} + - match: {columns.2.type: "null"} + - match: {columns.3.name: "t"} + - match: {columns.3.type: "boolean"} + - match: {columns.4.name: "u"} + - match: {columns.4.type: "integer"} + - match: {columns.5.name: "v"} + - match: {columns.5.type: "long"} + - length: {values: 3} + - match: {values.0: ["1",2.0,null,true,123,123]} + - match: {values.1: ["1",2.0,null,true,123,123]} + - match: {values.2: ["1",2.0,null,true,123,123]} diff --git a/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml new file mode 100644 index 0000000000000..4103bee7e290f --- /dev/null +++ b/x-pack/plugin/esql/qa/server/single-node/src/yamlRestTest/resources/rest-api-spec/test/80_text.yml @@ -0,0 +1,408 @@ +--- +setup: + + - do: + indices.create: + index: test + body: + mappings: + properties: + "emp_no": + type: long + name: + type: keyword + job: + type: text + fields: + raw: + type: keyword + tag: + type: text + + - do: + bulk: + index: test + refresh: true + body: + - { "index": { } } + - { "emp_no": 10, "name": "Jenny", "job": "IT Director", "tag": "foo bar" } + - { "index": { } } + - { "emp_no": 20, "name": "John", "job": "Payroll Specialist", "tag": "baz" } + +--- +"all": + - do: + esql.query: + body: + query: 'from test | sort emp_no' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "job" } + - match: { columns.1.type: "text" } + - match: { columns.2.name: "job.raw" } + - match: { columns.2.type: "keyword" } + - match: { columns.3.name: "name" } + - match: { columns.3.type: "keyword" } + - match: { columns.4.name: "tag" } + - match: { columns.4.type: "text" } + + - length: { values: 2 } + - match: { values.0: [ 10, "IT Director", "IT Director", "Jenny", "foo bar"] } + - match: { values.1: [ 20, "Payroll Specialist", "Payroll Specialist", "John", "baz"] } + + +--- +"filter by text": + - do: + esql.query: + body: + query: 'from test | where tag == "baz" | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"like by text": + - do: + esql.query: + body: + query: 'from test | where tag LIKE "*az" | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"rlike by text": + - do: + esql.query: + body: + query: 'from test | where tag RLIKE ".*az" | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"eval and filter text": + - do: + esql.query: + body: + query: 'from test | eval x = tag | where x == "baz" | keep emp_no, name, job, x' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "x" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"filter on text multi-field": + - do: + esql.query: + body: + query: 'from test | where job == "IT Director" | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 10, "Jenny", "IT Director", "foo bar"] } + +--- +"like by multi-field text": + - do: + esql.query: + body: + query: 'from test | where job LIKE "*Specialist" | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"rlike by multi-field text": + - do: + esql.query: + body: + query: 'from test | where job RLIKE ".*Specialist" | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 1 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + + +--- +"sort by text": + - do: + esql.query: + body: + query: 'from test | sort tag | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 2 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + - match: { values.1: [ 10, "Jenny", "IT Director", "foo bar"] } + + +--- +"sort by text multi-field": + - do: + esql.query: + body: + query: 'from test | sort job | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 2 } + - match: { values.0: [ 10, "Jenny", "IT Director", "foo bar"] } + - match: { values.1: [ 20, "John", "Payroll Specialist", "baz"] } + +--- +"sort by text multi-field desc": + - do: + esql.query: + body: + query: 'from test | sort job desc | keep emp_no, name, job, tag' + + - match: { columns.0.name: "emp_no" } + - match: { columns.0.type: "long" } + - match: { columns.1.name: "name" } + - match: { columns.1.type: "keyword" } + - match: { columns.2.name: "job" } + - match: { columns.2.type: "text" } + - match: { columns.3.name: "tag" } + - match: { columns.3.type: "text" } + + - length: { values: 2 } + - match: { values.0: [ 20, "John", "Payroll Specialist", "baz"] } + - match: { values.1: [ 10, "Jenny", "IT Director", "foo bar"] } + + +--- +"text in functions": + - do: + esql.query: + body: + query: 'from test | sort name | eval description = concat(name, " - ", job) | keep description' + + - match: { columns.0.name: "description" } + - match: { columns.0.type: "keyword" } + + - length: { values: 2 } + - match: { values.0: [ "Jenny - IT Director"] } + - match: { values.1: [ "John - Payroll Specialist"] } + + +--- +"stats text with raw": + - do: + esql.query: + body: + query: 'from test | stats jobs = count(job) | keep jobs' + + - match: { columns.0.name: "jobs" } + - match: { columns.0.type: "long" } + + - length: { values: 1 } + - match: { values.0: [ 2 ] } + + +--- +"stats text no raw": + - do: + esql.query: + body: + query: 'from test | stats tags = count(tag) | keep tags' + + - match: { columns.0.name: "tags" } + - match: { columns.0.type: "long" } + + - length: { values: 1 } + - match: { values.0: [ 2 ] } + + +--- +"stats by text with raw": + - do: + esql.query: + body: + query: 'from test | stats names = count(name) by job | keep names' + + - match: { columns.0.name: "names" } + - match: { columns.0.type: "long" } + + - length: { values: 2 } + - match: { values.0: [ 1 ] } + - match: { values.0: [ 1 ] } + + +--- +"stats by text no raw": + - do: + esql.query: + body: + query: 'from test | stats names = count(name) by tag | keep names' + + - match: { columns.0.name: "names" } + - match: { columns.0.type: "long" } + + - length: { values: 2 } + - match: { values.0: [ 1 ] } + - match: { values.0: [ 1 ] } + +--- +"text with synthetic source": + - skip: + version: "all" + reason: "AwaitsFix https://github.com/elastic/elasticsearch/issues/99183" + + - do: + indices.create: + index: test2 + body: + mappings: + _source: + mode: synthetic + properties: + "emp_no": + type: long + name: + type: keyword + job: + type: text + fields: + raw: + type: keyword + + - do: + bulk: + index: test2 + refresh: true + body: + - { "index": { } } + - { "emp_no": 10, "name": "Jenny", "job": "IT Director" } + - { "index": { } } + - { "emp_no": 20, "name": "John", "job": "Payroll Specialist" } + + - do: + esql.query: + body: + query: 'from test2 | sort emp_no | keep job' + + - match: { columns.0.name: "job" } + - match: { columns.0.type: "text" } + + - length: { values: 2 } + - match: { values.0.0: "IT Director" } + - match: { values.1.0: "Payroll Specialist" } + + +--- +"stored text with synthetic source": + - do: + indices.create: + index: test2 + body: + mappings: + _source: + mode: synthetic + properties: + "emp_no": + type: long + name: + type: keyword + job: + type: text + store: true + + - do: + bulk: + index: test2 + refresh: true + body: + - { "index": { } } + - { "emp_no": 10, "name": "Jenny", "job": "IT Director"} + - { "index": { } } + - { "emp_no": 20, "name": "John", "job": "Payroll Specialist" } + + - do: + esql.query: + body: + query: 'from test2 | sort emp_no | keep job' + + - match: { columns.0.name: "job" } + - match: { columns.0.type: "text" } + + - length: { values: 2 } + - match: { values.0.0: "IT Director" } + - match: { values.1.0: "Payroll Specialist" } diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java index 6001fd90e087a..e9dc848024448 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/EsqlSpecTestCase.java @@ -100,7 +100,13 @@ protected final void doTest() throws Throwable { assertNotNull(answer.get("values")); @SuppressWarnings("unchecked") List> actualValues = (List>) answer.get("values"); - assertData(expectedColumnsWithValues, actualValues, LOGGER, value -> value == null ? "null" : value.toString()); + assertData( + expectedColumnsWithValues, + actualValues, + testCase.ignoreOrder, + LOGGER, + value -> value == null ? "null" : value.toString() + ); } private Throwable reworkException(Throwable th) { diff --git a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java index 42bc9872c7e8e..7d90cf47cae09 100644 --- a/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java +++ b/x-pack/plugin/esql/qa/server/src/main/java/org/elasticsearch/xpack/esql/qa/rest/RestEsqlTestCase.java @@ -16,6 +16,7 @@ import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; @@ -75,6 +76,11 @@ public RequestObjectBuilder columnar(boolean columnar) throws IOException { return this; } + public RequestObjectBuilder params(String rawParams) throws IOException { + builder.rawField("params", new BytesArray(rawParams).streamInput(), XContentType.JSON); + return this; + } + public RequestObjectBuilder timeZone(ZoneId zoneId) throws IOException { builder.field("time_zone", zoneId); return this; @@ -293,6 +299,59 @@ public void testMetadataFieldsOnMultipleIndices() throws IOException { assertMap(result, matchesMap().entry("columns", columns).entry("values", values)); } + public void testErrorMessageForEmptyParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[]").build()) + ); + assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Not enough actual parameters 0")); + } + + public void testErrorMessageForInvalidParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[{\"x\":\"y\"}]").build()) + ); + assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Required [value, type]")); + } + + public void testErrorMessageForMissingTypeInParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[\"x\", 123, true, {\"value\": \"y\"}]").build()) + ); + assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Required [type]")); + } + + public void testErrorMessageForMissingValueInParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1").params("[\"x\", 123, true, {\"type\": \"y\"}]").build()) + ); + assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("Required [value]")); + } + + public void testErrorMessageForInvalidTypeInParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql(new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"byte\", \"value\": 5}]").build()) + ); + assertThat(EntityUtils.toString(re.getResponse().getEntity()), containsString("illegal data type [byte]")); + } + + public void testErrorMessageForArrayValuesInParams() throws IOException { + ResponseException re = expectThrows( + ResponseException.class, + () -> runEsql( + new RequestObjectBuilder().query("row a = 1 | eval x = ?").params("[{\"type\": \"integer\", \"value\": [5, 6, 7]}]").build() + ) + ); + assertThat( + EntityUtils.toString(re.getResponse().getEntity()), + containsString("[params] value doesn't support values of type: START_ARRAY") + ); + } + private static String expectedTextBody(String format, int count, @Nullable Character csvDelimiter) { StringBuilder sb = new StringBuilder(); switch (format) { diff --git a/x-pack/plugin/esql/qa/testFixtures/build.gradle b/x-pack/plugin/esql/qa/testFixtures/build.gradle index e94ebf5f5b08c..d313d615a1a42 100644 --- a/x-pack/plugin/esql/qa/testFixtures/build.gradle +++ b/x-pack/plugin/esql/qa/testFixtures/build.gradle @@ -12,3 +12,21 @@ dependencies { implementation project(':server') api "net.sf.supercsv:super-csv:${versions.supercsv}" } + +/** + * Runs CSV Spec Tests data loader to load data to a running stand-alone instance + * Accepts an URL as first argument, eg. http://localhost:9200 or http://user:pass@localhost:9200 + * + * eg. + * ./gradlew :x-pack:plugin:esql:qa:testFixtures:loadCsvSpecData --args="http://elastic-admin:elastic-password@localhost:9200" + * + * If no arguments are specified, the default URL is http://localhost:9200 without authentication. + * It also supports HTTPS. + */ +task loadCsvSpecData(type: JavaExec) { + group = "Execution" + description = "Loads ESQL CSV Spec Tests data on a running stand-alone instance" + classpath = sourceSets.main.runtimeClasspath + mainClass = "org.elasticsearch.xpack.esql.CsvTestsDataLoader" +} + diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java index dba1d29656c95..994dd2b99852d 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvAssert.java @@ -16,6 +16,8 @@ import org.hamcrest.Matchers; import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -36,9 +38,9 @@ public final class CsvAssert { private CsvAssert() {} - static void assertResults(ExpectedResults expected, ActualResults actual, Logger logger) { + static void assertResults(ExpectedResults expected, ActualResults actual, boolean ignoreOrder, Logger logger) { assertMetadata(expected, actual, logger); - assertData(expected, actual, logger); + assertData(expected, actual, ignoreOrder, logger); } static void assertMetadata(ExpectedResults expected, ActualResults actual, Logger logger) { @@ -146,16 +148,31 @@ private static void assertMetadata( } } - static void assertData(ExpectedResults expected, ActualResults actual, Logger logger) { - assertData(expected, actual.values(), logger, Function.identity()); + static void assertData(ExpectedResults expected, ActualResults actual, boolean ignoreOrder, Logger logger) { + assertData(expected, actual.values(), ignoreOrder, logger, Function.identity()); + } + + public static void assertData( + ExpectedResults expected, + Iterator> actualValuesIterator, + boolean ignoreOrder, + Logger logger, + Function valueTransformer + ) { + assertData(expected, EsqlTestUtils.getValuesList(actualValuesIterator), ignoreOrder, logger, valueTransformer); } public static void assertData( ExpectedResults expected, List> actualValues, + boolean ignoreOrder, Logger logger, Function valueTransformer ) { + if (ignoreOrder) { + expected.values().sort(resultRowComparator(expected.columnTypes())); + actualValues.sort(resultRowComparator(expected.columnTypes())); + } var expectedValues = expected.values(); for (int row = 0; row < expectedValues.size(); row++) { @@ -210,6 +227,35 @@ public static void assertData( } } + private static Comparator> resultRowComparator(List types) { + return (x, y) -> { + for (int i = 0; i < x.size(); i++) { + Object left = x.get(i); + if (left instanceof List l) { + left = l.isEmpty() ? null : l.get(0); + } + Object right = y.get(i); + if (right instanceof List r) { + right = r.isEmpty() ? null : r.get(0); + } + if (left == null && right == null) { + continue; + } + if (left == null) { + return 1; + } + if (right == null) { + return -1; + } + int result = types.get(i).comparator().compare(left, right); + if (result != 0) { + return result; + } + } + return 0; + }; + } + private static Object rebuildExpected(Object expectedValue, Class clazz, Function mapper) { if (List.class.isAssignableFrom(expectedValue.getClass())) { assertThat(((List) expectedValue).get(0), instanceOf(clazz)); diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java index f1baa3bbfd31d..7fafa794653e5 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/CsvTestUtils.java @@ -32,7 +32,9 @@ import java.net.URL; import java.util.ArrayList; import java.util.Arrays; +import java.util.Comparator; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -308,10 +310,20 @@ public enum Type { SCALED_FLOAT(s -> s == null ? null : scaledFloat(s, "100"), Double.class), KEYWORD(Object::toString, BytesRef.class), TEXT(Object::toString, BytesRef.class), - IP(StringUtils::parseIP, BytesRef.class), + IP( + StringUtils::parseIP, + (l, r) -> l instanceof String maybeIP + ? StringUtils.parseIP(maybeIP).compareTo(StringUtils.parseIP(String.valueOf(r))) + : ((BytesRef) l).compareTo((BytesRef) r), + BytesRef.class + ), VERSION(v -> new Version(v).toBytesRef(), BytesRef.class), NULL(s -> null, Void.class), - DATETIME(x -> x == null ? null : DateFormatters.from(UTC_DATE_TIME_FORMATTER.parse(x)).toInstant().toEpochMilli(), Long.class), + DATETIME( + x -> x == null ? null : DateFormatters.from(UTC_DATE_TIME_FORMATTER.parse(x)).toInstant().toEpochMilli(), + (l, r) -> l instanceof Long maybeIP ? maybeIP.compareTo((Long) r) : l.toString().compareTo(r.toString()), + Long.class + ), BOOLEAN(Booleans::parseBoolean, Boolean.class); private static final Map LOOKUP = new HashMap<>(); @@ -341,9 +353,20 @@ public enum Type { private final Function converter; private final Class clazz; + private final Comparator comparator; + @SuppressWarnings("unchecked") Type(Function converter, Class clazz) { + this( + converter, + Comparable.class.isAssignableFrom(clazz) ? (a, b) -> ((Comparable) a).compareTo(b) : Comparator.comparing(Object::toString), + clazz + ); + } + + Type(Function converter, Comparator comparator, Class clazz) { this.converter = converter; + this.comparator = comparator; this.clazz = clazz; } @@ -374,6 +397,10 @@ Object convert(String value) { Class clazz() { return clazz; } + + public Comparator comparator() { + return comparator; + } } record ActualResults( @@ -383,7 +410,7 @@ record ActualResults( List pages, Map> responseHeaders ) { - List> values() { + Iterator> values() { return EsqlQueryResponse.pagesToValues(dataTypes(), pages); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java similarity index 86% rename from x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java rename to x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java index a9a5e411c0298..dd86742785d15 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/java/org/elasticsearch/xpack/esql/EsqlTestUtils.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.compute.data.BlockUtils; +import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.plan.logical.local.LocalRelation; import org.elasticsearch.xpack.esql.plan.logical.local.LocalSupplier; @@ -28,6 +29,8 @@ import org.elasticsearch.xpack.ql.type.TypesTests; import org.junit.Assert; +import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -124,4 +127,18 @@ public boolean exists(String field) { } }; } + + public static List> getValuesList(EsqlQueryResponse results) { + return getValuesList(results.values()); + } + + public static List> getValuesList(Iterator> values) { + var valuesList = new ArrayList>(); + values.forEachRemaining(row -> { + var rowValues = new ArrayList<>(); + row.forEachRemaining(rowValues::add); + valuesList.add(rowValues); + }); + return valuesList; + } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec index 4ecd91ad1063a..ec94d0a849043 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/boolean.csv-spec @@ -49,7 +49,7 @@ avg(salary):double | always_false:boolean in from employees | keep emp_no, is_rehired, still_hired | where is_rehired in (still_hired, true) | where is_rehired != still_hired; - +ignoreOrder:true emp_no:integer |is_rehired:boolean |still_hired:boolean 10021 |true |false 10029 |true |false @@ -150,7 +150,7 @@ tf:boolean |tt:boolean |ff:boolean |ttff:boolean ; convertFromString -from employees | keep emp_no, is_rehired, first_name | eval rehired_str = to_string(is_rehired) | eval rehired_bool = to_boolean(rehired_str) | eval all_false = to_boolean(first_name) | drop first_name | limit 5; +from employees | sort emp_no | keep emp_no, is_rehired, first_name | eval rehired_str = to_string(is_rehired) | eval rehired_bool = to_boolean(rehired_str) | eval all_false = to_boolean(first_name) | drop first_name | limit 5; emp_no:integer |is_rehired:boolean |rehired_str:keyword |rehired_bool:boolean |all_false:boolean 10001 |[false, true] |[false, true] |[false, true] |false 10002 |[false, false] |[false, false] |[false, false] |false @@ -174,7 +174,7 @@ str:keyword | bool:boolean convertFromDouble from employees | eval h_2 = height - 2.0, double2bool = to_boolean(h_2) | where emp_no in (10036, 10037, 10038) | keep emp_no, height, *2bool; - +ignoreOrder:true emp_no:integer |height:double |double2bool:boolean 10036 |1.61 |true 10037 |2.0 |false @@ -189,7 +189,7 @@ row ul = [9223372036854775808, 9223372036854775807, 1, 0] | eval bool = to_bool( ; convertFromIntAndLong -from employees | keep emp_no, salary_change* | eval int2bool = to_boolean(salary_change.int), long2bool = to_boolean(salary_change.long) | limit 10; +from employees | sort emp_no | keep emp_no, salary_change* | eval int2bool = to_boolean(salary_change.int), long2bool = to_boolean(salary_change.long) | limit 10; emp_no:integer |salary_change:double |salary_change.int:integer | salary_change.keyword:keyword |salary_change.long:long |int2bool:boolean |long2bool:boolean 10001 |1.19 |1 |1.19 |1 |true |true @@ -207,7 +207,7 @@ emp_no:integer |salary_change:double |salary_change.int:integer | salary_ch // short and byte aren't actually tested, these are loaded as int blocks atm convertFromByteAndShort from employees | eval byte2bool = to_boolean(languages.byte), short2bool = to_boolean(languages.short) | where emp_no in (10019, 10020, 10030) | keep emp_no, languages, *2bool; - +ignoreOrder:true emp_no:integer |languages:integer |byte2bool:boolean |short2bool:boolean 10019 |1 |true |true 10020 |null |null |null diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec index b07259e01ddf2..f5bc5229283db 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/comparison.csv-spec @@ -9,6 +9,7 @@ emp_no:integer longToLong from employees +| sort emp_no | where languages.long < avg_worked_seconds | limit 1 | keep emp_no; @@ -19,6 +20,7 @@ emp_no:integer doubleToDouble from employees +| sort emp_no | where height < 10.0 | limit 1 | keep emp_no; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec index 7ea510ccb8b92..ea53ac5679aa9 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/conditional.csv-spec @@ -1,5 +1,6 @@ twoConditionsWithDefault from employees +| sort emp_no | eval type = case( languages <= 1, "monolingual", languages <= 2, "bilingual", @@ -22,6 +23,7 @@ emp_no:integer | type:keyword singleCondition from employees +| sort emp_no | eval g = case(gender == "F", true) | keep gender, g | limit 10; @@ -41,6 +43,7 @@ null |null conditionIsNull from employees +| sort emp_no | eval g = case( gender == "F", 1, languages > 1, 2, @@ -78,6 +81,7 @@ M |null |3 nullValue from employees +| sort emp_no | eval g = case(gender == "F", 1 + null, 10) | keep gender, g | limit 5; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec index 2e1da37949ad3..e04e870da7713 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/date.csv-spec @@ -58,6 +58,7 @@ emp_no:integer | x:keyword | y:keyword compareToString from employees | where hire_date < "1985-03-01T00:00:00Z" | keep emp_no, hire_date; +ignoreOrder:true emp_no:integer | hire_date:date 10009 | 1985-02-18T00:00:00.000Z @@ -149,7 +150,7 @@ y:date | count(emp_no):long ; in -from employees | eval x = date_trunc(1 year, hire_date) | where birth_date not in (x, hire_date) | keep x, hire_date | sort x desc | limit 4; +from employees | eval x = date_trunc(1 year, hire_date) | where birth_date not in (x, hire_date) | keep x, hire_date | sort x desc, hire_date | limit 4; x:date |hire_date:date 1999-01-01T00:00:00.000Z|1999-04-30T00:00:00.000Z @@ -159,7 +160,7 @@ x:date |hire_date:date ; convertFromDatetime -from employees| keep birth_date | eval bd = to_datetime(birth_date) | limit 2; +from employees | sort emp_no | keep birth_date | eval bd = to_datetime(birth_date) | limit 2; birth_date:date |bd:date 1953-09-02T00:00:00.000Z|1953-09-02T00:00:00.000Z @@ -438,6 +439,7 @@ dateFields from employees | where emp_no == 10049 or emp_no == 10050 | eval year = date_extract(birth_date, "year"), month = date_extract(birth_date, "month_of_year"), day = date_extract(birth_date, "day_of_month") | keep emp_no, year, month, day; +ignoreOrder:true emp_no:integer | year:long | month:long | day:long 10049 | null | null | null @@ -448,8 +450,167 @@ emp_no:integer | year:long | month:long | day:long dateFormatLocale from employees | where emp_no == 10049 or emp_no == 10050 | sort emp_no | eval birth_month = date_format(birth_date, "MMMM") | keep emp_no, birth_date, birth_month; +ignoreOrder:true emp_no:integer | birth_date:datetime | birth_month:keyword 10049 | null | null 10050 | 1958-05-21T00:00:00.000Z | May ; + +datePlusPeriod +row dt = to_dt("2100-01-01T01:01:01.000Z") +| eval plus = dt + 4 years + 3 months + 2 weeks + 1 day; + +dt:datetime |plus:datetime +2100-01-01T01:01:01.000Z |2104-04-16T01:01:01.000Z +; + +datePlusDuration +row dt = to_dt("2100-01-01T00:00:00.000Z") +| eval plus = dt + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:datetime |plus:datetime +2100-01-01T00:00:00.000Z |2100-01-01T01:01:01.001Z +; + +dateMinusPeriod +row dt = to_dt("2104-04-16T01:01:01.000Z") +| eval minus = dt - 4 years - 3 months - 2 weeks - 1 day; + +dt:datetime |minus:datetime +2104-04-16T01:01:01.000Z |2100-01-01T01:01:01.000Z +; + +dateMinusDuration +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval minus = dt - 1 hour - 1 minute - 1 second - 1 milliseconds; + +dt:datetime |minus:datetime +2100-01-01T01:01:01.001Z |2100-01-01T00:00:00.000Z +; + +datePlusPeriodAndDuration +row dt = to_dt("2100-01-01T00:00:00.000Z") +| eval plus = dt + 4 years + 3 months + 2 weeks + 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:datetime |plus:datetime +2100-01-01T00:00:00.000Z |2104-04-16T01:01:01.001Z +; + +dateMinusPeriodAndDuration +row dt = to_dt("2104-04-16T01:01:01.001Z") +| eval minus = dt - 4 years - 3 months - 2 weeks - 1 day - 1 hour - 1 minute - 1 second - 1 milliseconds; + +dt:datetime |minus:datetime +2104-04-16T01:01:01.001Z |2100-01-01T00:00:00.000Z +; + +datePlusPeriodMinusDuration +row dt = to_dt("2100-01-01T01:01:01.001Z") +| eval plus = dt + 4 years + 3 months + 2 weeks + 1 day - 1 hour - 1 minute - 1 second - 1 milliseconds; + +dt:datetime |plus:datetime +2100-01-01T01:01:01.001Z |2104-04-16T00:00:00.000Z +; + +datePlusDurationMinusPeriod +row dt = to_dt("2104-04-16T00:00:00.000Z") +| eval plus = dt - 4 years - 3 months - 2 weeks - 1 day + 1 hour + 1 minute + 1 second + 1 milliseconds; + +dt:datetime |plus:datetime +2104-04-16T00:00:00.000Z |2100-01-01T01:01:01.001Z +; + +dateMathArithmeticOverflow +row dt = to_dt(9223372036854775807) +| eval plus = dt + 1 day +| keep plus; + +warning:Line 2:15: evaluation of [dt + 1 day] failed, treating result as null. Only first 20 failures recorded. +warning:java.lang.ArithmeticException: long overflow + +plus:datetime +null +; + +dateMathDateException +row dt = to_dt(0) +| eval plus = dt + 2147483647 years +| keep plus; + +warning:Line 2:15: evaluation of [dt + 2147483647 years] failed, treating result as null. Only first 20 failures recorded. +warning:java.time.DateTimeException: Invalid value for Year (valid values -999999999 - 999999999): 2147485617 + +plus:datetime +null +; + +dateMathNegatedPeriod +row dt = to_dt(0) +| eval plus = -(-1 year) + dt +| keep plus; + +plus:datetime +1971-01-01T00:00:00.000Z +; + +dateMathNegatedDuration +row dt = to_dt(0) +| eval plus = -(-1 second) + dt +| keep plus; + +plus:datetime +1970-01-01T00:00:01.000Z +; + + +fieldDateMathSimple +from employees +| sort emp_no +| eval bd = 1 year + birth_date - 1 millisecond +| keep birth_date, bd +| limit 5; + +birth_date:datetime |bd:datetime +1953-09-02T00:00:00.000Z |1954-09-01T23:59:59.999Z +1964-06-02T00:00:00.000Z |1965-06-01T23:59:59.999Z +1959-12-03T00:00:00.000Z |1960-12-02T23:59:59.999Z +1954-05-01T00:00:00.000Z |1955-04-30T23:59:59.999Z +1955-01-21T00:00:00.000Z |1956-01-20T23:59:59.999Z +; + +fieldDateMath +from employees +| eval bd = -1 millisecond + birth_date + 1 year +| eval bd = date_trunc(1 day, bd) +| eval bd = bd + 1 day - 1 year +| where birth_date != bd +| stats c = count(bd); + +c:long +0 +; + +filteringWithDateMath +from employees +| sort emp_no +| where birth_date < to_dt("2023-08-25T11:25:41.052Z") - 70 years +| keep birth_date; + +birth_date:datetime +1953-04-20T00:00:00.000Z +1952-04-19T00:00:00.000Z +1953-01-23T00:00:00.000Z +1952-12-24T00:00:00.000Z +1952-07-08T00:00:00.000Z +1953-04-03T00:00:00.000Z +1953-02-08T00:00:00.000Z +1953-07-28T00:00:00.000Z +1952-08-06T00:00:00.000Z +1952-11-13T00:00:00.000Z +1953-01-07T00:00:00.000Z +1952-05-15T00:00:00.000Z +1952-06-13T00:00:00.000Z +1952-02-27T00:00:00.000Z +1953-04-21T00:00:00.000Z +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec index b02ad46477c88..137820e695892 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/docs.csv-spec @@ -21,6 +21,7 @@ avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword docsEval // tag::eval[] FROM employees +| SORT emp_no | KEEP first_name, last_name, height | EVAL height_feet = height * 3.281, height_cm = height * 100 // end::eval[] @@ -36,6 +37,7 @@ Georgi |Facello | 2.03 | 6.66043 | 202.99999999999997 docsEvalReplace // tag::evalReplace[] FROM employees +| SORT emp_no | KEEP first_name, last_name, height | EVAL height = height * 3.281 // end::evalReplace[] @@ -51,10 +53,10 @@ Georgi | Facello | 6.66043 docsLimit // tag::limit[] FROM employees +| SORT emp_no ASC | LIMIT 5 // end::limit[] | KEEP emp_no -| SORT emp_no ASC ; emp_no:integer diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec index 37d438228151e..cd3afa25fc0a6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/drop.csv-spec @@ -1,19 +1,19 @@ sortWithLimitOne_DropHeight -from employees | sort languages | limit 1 | drop height*; +from employees | sort languages, emp_no | limit 1 | drop height*; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer |salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean 244294991 |1955-01-21T00:00:00.000Z|10005 |Kyoichi |M |1989-09-12T00:00:00.000Z|[false, false, false, true]|null |1 |1 |1 |1 |Maliniak |63528 |[-2.14, 13.07] |[-2, 13] |[-2.14, 13.07] |[-2, 13] |true ; simpleEvalWithSortAndLimitOne_DropHeight -from employees | eval x = languages + 7 | sort x | limit 1 | drop height*; +from employees | eval x = languages + 7 | sort x, emp_no | limit 1 | drop height*; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer |salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean | x:integer 244294991 |1955-01-21T00:00:00.000Z|10005 |Kyoichi |M |1989-09-12T00:00:00.000Z|[false, false, false, true]|null |1 |1 |1 |1 |Maliniak |63528 |[-2.14, 13.07] |[-2, 13] |[-2.14, 13.07] |[-2, 13] |true |8 ; whereWithEvalGeneratedValue_DropHeight -from employees | eval x = salary / 2 | where x > 37000 | drop height*; +from employees | sort emp_no | eval x = salary / 2 | where x > 37000 | drop height*; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer |salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean | x:integer 393084805 |1957-05-23T00:00:00.000Z|10007 |Tzvetan |F |1989-02-10T00:00:00.000Z|[false, false, true, true]|null |4 |4 |4 |4 |Zielinski |74572 |[-7.06, 0.57, 1.99] |[-7, 0, 1] |[-7.06, 0.57, 1.99] |[-7, 0, 1] |true |37286 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec index 39758a3f21d7f..3ecb31722277c 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/floats.csv-spec @@ -1,7 +1,7 @@ // Floating point types-specific tests inDouble -from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002); +from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002) | sort emp_no; emp_no:integer |height:double |height.float:double |height.half_float:double |height.scaled_float:double 10001 |2.03 |2.0299999713897705 |2.029296875 |2.0300000000000002 @@ -9,7 +9,7 @@ emp_no:integer |height:double |height.float:double |height.half_float:double |h ; inFloat -from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height.float in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002); +from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height.float in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002) | sort emp_no; emp_no:integer |height:double |height.float:double |height.half_float:double |height.scaled_float:double 10001 |2.03 |2.0299999713897705 |2.029296875 |2.0300000000000002 @@ -17,7 +17,7 @@ emp_no:integer |height:double |height.float:double |height.half_float:double |h ; inHalfFloat -from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height.half_float in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002); +from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height.half_float in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002) | sort emp_no; emp_no:integer |height:double |height.float:double |height.half_float:double |height.scaled_float:double 10001 |2.03 |2.0299999713897705 |2.029296875 |2.0300000000000002 @@ -25,7 +25,7 @@ emp_no:integer |height:double |height.float:double |height.half_float:double |h ; inScaledFloat -from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height.scaled_float in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002); +from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height.scaled_float in (2.03, 2.0299999713897705, 2.029296875, 2.0300000000000002) | sort emp_no; emp_no:integer |height:double |height.float:double |height.half_float:double |height.scaled_float:double 10001 |2.03 |2.0299999713897705 |2.029296875 |2.0300000000000002 @@ -212,7 +212,7 @@ autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" | EVAL bh = auto_bucket(height, 20, 1.41, 2.10) -| SORT hire_date +| SORT hire_date, height | KEEP hire_date, height, bh ; @@ -225,8 +225,8 @@ hire_date:date | height:double | bh:double 1985-10-14T00:00:00.000Z | 1.77 | 1.75 1985-10-20T00:00:00.000Z | 1.94 | 1.9000000000000001 1985-11-19T00:00:00.000Z | 1.8 | 1.8 -1985-11-20T00:00:00.000Z | 1.99 | 1.9500000000000002 1985-11-20T00:00:00.000Z | 1.93 | 1.9000000000000001 +1985-11-20T00:00:00.000Z | 1.99 | 1.9500000000000002 1985-11-21T00:00:00.000Z | 2.08 | 2.0500000000000003 ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/id.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/id.csv-spec index 9bbdce25f2ab8..238135ef4c53f 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/id.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/id.csv-spec @@ -4,6 +4,7 @@ selectAll FROM apps [metadata _id]; +ignoreOrder:true id:integer |name:keyword |version:version | _id:keyword 1 |aaaaa |1 | 1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ignore-order.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ignore-order.csv-spec new file mode 100644 index 0000000000000..962314f99bee8 --- /dev/null +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ignore-order.csv-spec @@ -0,0 +1,85 @@ +# just a few tests to verify that ignoreOrder works as expected + +simple +from employees | where emp_no < 10004 | keep emp_no, still_hired; +ignoreOrder:true +emp_no:integer | still_hired:boolean +10003 | false +10002 | true +10001 | true +; + +simple2 +from employees | where emp_no < 10004 | keep emp_no, still_hired; +ignoreOrder:true +emp_no:integer | still_hired:boolean +10001 | true +10003 | false +10002 | true +; + + +booleansFirst +from employees | where emp_no < 10004 | keep still_hired, emp_no; +ignoreOrder:true +still_hired:boolean | emp_no:integer +true | 10001 +false | 10003 +true | 10002 +; + + +booleansFirst2 +from employees | where emp_no < 10004 | keep still_hired, emp_no; +ignoreOrder:true +still_hired:boolean | emp_no:integer +true | 10001 +true | 10002 +false | 10003 +; + +nulls +from employees | where emp_no >= 10007 and emp_no < 10012 | keep gender, emp_no; +ignoreOrder:true +gender:keyword | emp_no:integer +F | 10007 +M | 10008 +F | 10009 +null | 10010 +null | 10011 +; + + +nulls2 +from employees | where emp_no >= 10007 and emp_no < 10012 | keep gender, emp_no; +ignoreOrder:true +gender:keyword | emp_no:integer +null | 10010 +F | 10009 +F | 10007 +M | 10008 +null | 10011 +; + +dates +from employees | where emp_no >= 10007 and emp_no < 10012 | keep birth_date, emp_no; +ignoreOrder:true +birth_date:date | emp_no:integer +1957-05-23T00:00:00Z | 10007 +1958-02-19T00:00:00Z | 10008 +1952-04-19T00:00:00Z | 10009 +1963-06-01T00:00:00Z | 10010 +1953-11-07T00:00:00Z | 10011 +; + +dates2 +from employees | where emp_no >= 10007 and emp_no < 10012 | keep birth_date, emp_no; +ignoreOrder:true +birth_date:date | emp_no:integer +1953-11-07T00:00:00Z | 10011 +1957-05-23T00:00:00Z | 10007 +1952-04-19T00:00:00Z | 10009 +1958-02-19T00:00:00Z | 10008 +1963-06-01T00:00:00Z | 10010 +; + diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec index 0c73e24136a0f..daef21b57f1db 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ints.csv-spec @@ -9,6 +9,7 @@ emp_no:integer |avg_worked_seconds:long inShortAndByte from employees | keep emp_no, languages.short, languages.byte | where languages.short in (2, 4, 5) and languages.byte in (4, -1) and emp_no < 10010; +ignoreOrder:true emp_no:integer |languages.short:short|languages.byte:byte 10003 |4 |4 @@ -17,6 +18,7 @@ emp_no:integer |languages.short:short|languages.byte:byte inCast from employees | keep emp_no, languages.byte, avg_worked_seconds, height | where languages.byte in (4, -1, avg_worked_seconds, 1000000000000, null, height) and emp_no < 10010; +ignoreOrder:true emp_no:integer |languages.byte:byte |avg_worked_seconds:long |height:double 10003 |4 |200296405 |1.83 @@ -26,6 +28,7 @@ emp_no:integer |languages.byte:byte |avg_worked_seconds:long |height:double // `<= 10030` insures going over records where is_null(languages)==true; `in (.., emp_no)` prevents pushing the IN to Lucene inOverNulls from employees | keep emp_no, languages | where languages is null or emp_no <= 10030 | where languages in (2, 3, emp_no); +ignoreOrder:true emp_no:integer |languages:integer 10001 |2 @@ -360,7 +363,7 @@ autoBucket FROM employees | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" | EVAL bs = auto_bucket(salary, 20, 25324, 74999) -| SORT hire_date +| SORT hire_date, salary | KEEP hire_date, salary, bs // end::auto_bucket[] ; @@ -375,8 +378,8 @@ hire_date:date | salary:integer | bs:double 1985-10-14T00:00:00.000Z | 54329 | 50000.0 1985-10-20T00:00:00.000Z | 48735 | 45000.0 1985-11-19T00:00:00.000Z | 52833 | 50000.0 -1985-11-20T00:00:00.000Z | 74999 | 70000.0 1985-11-20T00:00:00.000Z | 33956 | 30000.0 +1985-11-20T00:00:00.000Z | 74999 | 70000.0 1985-11-21T00:00:00.000Z | 56371 | 55000.0 // end::auto_bucket-result[] ; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec index d69be91cd2f22..32770d06df149 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/ip.csv-spec @@ -1,5 +1,6 @@ simpleProject from hosts | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth0 |alpha |127.0.0.1 |127.0.0.1 @@ -113,6 +114,7 @@ null |[127.0.0.1, 127.0.0.2, 127.0.0.3] conditional from hosts | eval eq=case(ip0==ip1, ip0, ip1) | keep eq, ip0, ip1; +ignoreOrder:true eq:ip |ip0:ip |ip1:ip 127.0.0.1 |127.0.0.1 |127.0.0.1 @@ -129,6 +131,7 @@ fe80::cae2:65ff:fece:fec1 |[fe80::cae2:65ff:fece:feb in from hosts | eval eq=case(ip0==ip1, ip0, ip1) | where eq in (ip0, ip1) | keep card, host, ip0, ip1, eq; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip |eq:ip eth0 |alpha |127.0.0.1 |127.0.0.1 |127.0.0.1 @@ -150,6 +153,7 @@ eth1 |beta |127.0.0.1 |127.0.0.2 cidrMatchNullField from hosts | where cidr_match(ip0, "127.0.0.2/32") is null | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth0 |epsilon |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1]|fe80::cae2:65ff:fece:fec1 @@ -159,6 +163,7 @@ eth2 |epsilon |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece cdirMatchMultipleArgs from hosts | where cidr_match(ip1, "127.0.0.2/32", "127.0.0.3/32") | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 @@ -167,6 +172,7 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 cidrMatchFunctionArg from hosts | where cidr_match(ip1, concat("127.0.0.2", "/32"), "127.0.0.3/32") | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 @@ -175,6 +181,7 @@ eth0 |gamma |fe80::cae2:65ff:fece:feb9|127.0.0.3 cidrMatchFieldArg from hosts | eval cidr="127.0.0.2" | where cidr_match(ip1, cidr, "127.0.0.3/32") | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |beta |127.0.0.1 |127.0.0.2 @@ -207,6 +214,7 @@ str1:keyword |str2:keyword |ip1:ip |ip2:ip pushDownIP from hosts | where ip1 == to_ip("::1") | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth1 |alpha |::1 |::1 @@ -215,6 +223,7 @@ eth0 |beta |127.0.0.1 |::1 pushDownIPWithIn from hosts | where ip1 in (to_ip("::1"), to_ip("127.0.0.1")) | keep card, host, ip0, ip1; +ignoreOrder:true card:keyword |host:keyword |ip0:ip |ip1:ip eth0 |alpha |127.0.0.1 |127.0.0.1 @@ -224,6 +233,7 @@ eth0 |beta |127.0.0.1 |::1 pushDownIPWithComparision from hosts | where ip1 > to_ip("127.0.0.1") | keep card, ip1; +ignoreOrder:true card:keyword |ip1:ip eth1 |127.0.0.2 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec index d910cb3a957f7..3637081c3c4b6 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/keep.csv-spec @@ -1,5 +1,5 @@ projectFrom -from employees | keep languages, emp_no, first_name, last_name | limit 10; +from employees | sort emp_no | keep languages, emp_no, first_name, last_name | limit 10; languages:integer | emp_no:integer | first_name:keyword | last_name:keyword 2 | 10001 | Georgi | Facello @@ -15,7 +15,8 @@ languages:integer | emp_no:integer | first_name:keyword | last_name:keyword ; projectFromWithFilter -from employees | keep languages, emp_no, first_name, last_name | eval x = emp_no + 10 | where x > 10040 and x < 10050 | limit 5; +from employees | keep languages, emp_no, first_name, last_name | eval x = emp_no + 10 | where x > 10040 and x < 10046; +ignoreOrder:true languages:integer | emp_no:integer | first_name:keyword | last_name:keyword | x:integer 4 | 10031 | null | Joslin | 10041 @@ -41,6 +42,7 @@ c : long averageByField from employees | stats avg(avg_worked_seconds) by languages; +ignoreOrder:true avg(avg_worked_seconds):double | languages:integer 3.181719481E8 | null @@ -60,6 +62,7 @@ avg(avg_worked_seconds):double | languages.long:long statsBySubField from employees | stats avg=avg(avg_worked_seconds),min=min(avg_worked_seconds),max=max(avg_worked_seconds) by languages.long; +ignoreOrder:true avg:double | min:long | max:long | languages.long:long 3.181719481E8 | 226435054 | 374037782 | null @@ -137,6 +140,7 @@ avg(salary):double | last_name:keyword statsOfInteger from employees | where starts_with(last_name, "L") | stats a=avg(salary), s=sum(salary), c=count(last_name) by last_name; +ignoreOrder:true a:double | s:long | c:long |last_name:keyword 42520.0 |85040 |2 |Lortz @@ -178,6 +182,7 @@ med:double | languages:integer multiConditionalWhere from employees | eval abc = 1+2 | where (abc + emp_no > 10100 or languages == 1) or (abc + emp_no < 10005 and gender == "F") | keep emp_no, languages, gender, first_name, abc; +ignoreOrder:true emp_no:integer | languages:integer | gender:keyword | first_name:keyword | abc:integer 10005 | 1 | M | Kyoichi | 3 @@ -201,7 +206,7 @@ emp_no:integer | languages:integer | gender:keyword | first_name:keyword | abc:i ; projectFromWithStatsAfterLimit -from employees | keep gender, avg_worked_seconds, first_name, last_name | limit 10 | stats m = max(avg_worked_seconds) by gender; +from employees | sort emp_no | keep gender, avg_worked_seconds, first_name, last_name | limit 10 | stats m = max(avg_worked_seconds) by gender; m:long | gender:keyword 315236372 | null @@ -232,7 +237,7 @@ emp_no:integer | languages:integer | first_name:keyword | last_name:keyword ; sortWithLimitOne -from employees | sort languages | limit 1; +from employees | sort languages, emp_no | limit 1; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | height:double | height.float:double | height.half_float:double | height.scaled_float:double | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer |salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean 244294991 |1955-01-21T00:00:00.000Z|10005 |Kyoichi |M |2.05 |2.049999952316284|2.05078125 |2.05 |1989-09-12T00:00:00.000Z|[false, false, false, true]|null |1 |1 |1 |1 |Maliniak |63528 |[-2.14, 13.07] |[-2, 13] |[-2.14, 13.07] |[-2, 13] |true @@ -261,10 +266,10 @@ height:double | languages.long:long | still_hired:boolean ; simpleEvalWithSortAndLimitOne -from employees | eval x = languages + 7 | sort x | limit 1; +from employees | eval x = languages + 7 | sort x, avg_worked_seconds | limit 1; avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | height:double | height.float:double | height.half_float:double | height.scaled_float:double | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer | salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean | x:integer -244294991 |1955-01-21T00:00:00.000Z|10005 |Kyoichi |M |2.05 |2.049999952316284|2.05078125 |2.05 |1989-09-12T00:00:00.000Z|[false, false, false, true]|null |1 |1 |1 |1 |Maliniak |63528 |[-2.14, 13.07] |[-2, 13] |[-2.14, 13.07] |[-2, 13] |true |8 +208374744 |1956-11-14T00:00:00.000Z|10033 |null |M |1.63 |1.6299999952316284|1.6298828125 |1.6300000000000001 |1987-03-18T00:00:00.000Z|true |null |1 |1 |1 |1 |Merlo |70011 |null |null |null |null |false |8 ; evalOfAverageValue @@ -283,6 +288,7 @@ avg(ratio):double simpleWhere from employees | where salary > 70000 | keep first_name, last_name, salary; +ignoreOrder:true first_name:keyword | last_name:keyword | salary:integer Tzvetan | Zielinski | 74572 @@ -297,6 +303,7 @@ Valter | Sullins | 73578 whereAfterProject from employees | keep salary | where salary > 70000; +ignoreOrder:true salary:integer 74572 @@ -313,6 +320,7 @@ whereWithEvalGeneratedValue // the result from running on ES is the one with many decimals the test that runs locally is the one rounded to 2 decimals // the "height" fields have the values as 1.7, 1.7000000476837158, 1.7001953125, 1.7 from employees | eval x = salary / 2 | where x > 37000; +ignoreOrder:true avg_worked_seconds:long | birth_date:date | emp_no:integer | first_name:keyword | gender:keyword | height:double | height.float:double | height.half_float:double | height.scaled_float:double | hire_date:date | is_rehired:boolean | job_positions:keyword | languages:integer | languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | salary:integer | salary_change:double | salary_change.int:integer |salary_change.keyword:keyword |salary_change.long:long | still_hired:boolean | x:integer 393084805 |1957-05-23T00:00:00.000Z|10007 |Tzvetan |F |1.7 |1.7000000476837158|1.7001953125 |1.7 |1989-02-10T00:00:00.000Z|[false, false, true, true]|null |4 |4 |4 |4 |Zielinski |74572 |[-7.06, 0.57, 1.99] |[-7, 0, 1] |[-7.06, 0.57, 1.99] |[-7, 0, 1] |true |37286 @@ -329,6 +337,7 @@ x:double statsByDouble from employees | eval abc=1+2 | where abc + languages > 4 | stats count(height) by height; +ignoreOrder:true count(height):long | height:double 2 | 2.03 @@ -391,7 +400,7 @@ count(height):long | h1:double whereNegatedCondition -from employees | eval abc=1+2 | where abc + languages > 4 and languages.long != 1 | eval x=abc+languages | keep x, languages, languages.long | limit 3; +from employees | sort emp_no | eval abc=1+2 | where abc + languages > 4 and languages.long != 1 | eval x=abc+languages | keep x, languages, languages.long | limit 3; x:integer | languages:integer | languages.long:long 5 | 2 | 2 @@ -400,7 +409,7 @@ x:integer | languages:integer | languages.long:long ; evalOverride -from employees | eval languages = languages + 1 | eval languages = languages + 1 | limit 5 | keep l*; +from employees | sort emp_no | eval languages = languages + 1 | eval languages = languages + 1 | limit 5 | keep l*; languages.byte:integer | languages.long:long | languages.short:integer | last_name:keyword | languages:integer 2 | 2 | 2 | Facello | 4 @@ -425,15 +434,15 @@ avg(nullsum):double | count(nullsum):long ; fromStatsLimit -from employees | stats ac = avg(salary) by languages | limit 2; +from employees | stats ac = avg(salary) by languages | sort ac | limit 2; ac:double | languages:integer -52519.6 | null -48178.84210526316 | 2 +41680.76190476191 | 5 +47733.0 | 4 ; fromLimit -from employees | keep first_name | limit 2; +from employees | sort emp_no | keep first_name | limit 2; first_name:keyword Georgi @@ -470,6 +479,7 @@ x:integer filterKeyword from employees | where first_name != "abc" and emp_no < 10010 | keep first_name; +ignoreOrder:true first_name:keyword Georgi @@ -484,7 +494,7 @@ Sumant ; projectMultiValueKeywords -from employees | keep emp_no, job_positions, still_hired | limit 5; +from employees | sort emp_no | keep emp_no, job_positions, still_hired | limit 5; emp_no:integer | job_positions:keyword |still_hired:boolean 10001 |[Accountant, Senior Python Developer] |true @@ -495,7 +505,7 @@ emp_no:integer | job_positions:keyword ; projectMultiValueBooleans -from employees | keep emp_no, is_rehired, still_hired | limit 5; +from employees | sort emp_no | keep emp_no, is_rehired, still_hired | limit 5; emp_no:integer | is_rehired:boolean |still_hired:boolean 10001 |[false, true] |true @@ -506,7 +516,7 @@ emp_no:integer | is_rehired:boolean |still_hired:boolean ; projectMultiValueNumbers -from employees | keep emp_no, salary_change, salary_change.int, salary_change.long | limit 10; +from employees | sort emp_no | keep emp_no, salary_change, salary_change.int, salary_change.long | limit 10; emp_no:integer | salary_change:double |salary_change.int:integer|salary_change.long:long 10001 |1.19 |1 |1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json index 7edd242c50a7c..b650cb7e64564 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/mapping-basic.json @@ -20,6 +20,14 @@ }, "_meta_field": { "type" : "keyword" + }, + "job": { + "type": "text", + "fields": { + "raw": { + "type": "keyword" + } + } } } } diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-ignoreCsvTests.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-ignoreCsvTests.csv-spec index 34935384786f1..d89f3337c081b 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-ignoreCsvTests.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/metadata-ignoreCsvTests.csv-spec @@ -24,10 +24,16 @@ emp_no:integer ; metaIndexInAggs -from employees [metadata _index] | stats max = max(emp_no) by _index; +// tag::metaIndexInAggs[] +FROM employees [METADATA _index, _id] +| STATS max = MAX(emp_no) BY _index +// end::metaIndexInAggs[] +; +// tag::metaIndexInAggs-result[] max:integer |_index:keyword 10100 |employees +// end::metaIndexInAggs-result[] ; metaIndexAliasedInAggs @@ -96,10 +102,30 @@ min:integer |i:double ; overwritten -from employees [metadata _index, _version] | eval _index = 3, _version = "version" | keep emp_no, _index, _version | limit 3; +from employees [metadata _index, _version] | sort emp_no | eval _index = 3, _version = "version" | keep emp_no, _index, _version | limit 3; emp_no:integer |_index:integer |_version:keyword 10001 |3 |version 10002 |3 |version 10003 |3 |version ; + +multipleIndices +// tag::multipleIndices[] +FROM ul_logs, apps [METADATA _index, _version] +| WHERE id IN (13, 14) AND _version == 1 +| EVAL key = CONCAT(_index, "_", TO_STR(id)) +| SORT id, _index +| KEEP id, _index, _version, key +// end::multipleIndices[] +; + +// tag::multipleIndices-result[] + id:long | _index:keyword | _version:long | key:keyword +13 |apps |1 |apps_13 +13 |ul_logs |1 |ul_logs_13 +14 |apps |1 |apps_14 +14 |ul_logs |1 |ul_logs_14 + +// end::multipleIndices-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec index f99788eb7d708..799f8821d97da 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/rename.csv-spec @@ -76,7 +76,7 @@ d:integer | c:integer ; renameEvalProject -from employees | rename languages as x | keep x | eval z = 2 * x | keep x, z | limit 3; +from employees | sort emp_no | rename languages as x | keep x | eval z = 2 * x | keep x, z | limit 3; x:integer | z:integer 2 | 4 @@ -85,7 +85,7 @@ x:integer | z:integer ; renameProjectEval -from employees | eval y = languages | rename languages as x | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; +from employees | sort emp_no | eval y = languages | rename languages as x | keep x, y | eval x2 = x + 1 | eval y2 = y + 2 | limit 3; x:integer | y:integer | x2:integer | y2:integer 2 | 2 | 3 | 4 @@ -94,7 +94,7 @@ x:integer | y:integer | x2:integer | y2:integer ; renameWithFilterPushedToES -from employees | rename emp_no as x | keep languages, first_name, last_name, x | where x > 10030 and x < 10040 | limit 5; +from employees | sort emp_no | rename emp_no as x | keep languages, first_name, last_name, x | where x > 10030 and x < 10040 | limit 5; languages:integer | first_name:keyword | last_name:keyword | x:integer 4 | null | Joslin | 10031 @@ -105,7 +105,7 @@ languages:integer | first_name:keyword | last_name:keyword | x:integer ; renameNopProject -from employees | rename emp_no as emp_no | keep emp_no, last_name | limit 3; +from employees | sort emp_no | rename emp_no as emp_no | keep emp_no, last_name | limit 3; emp_no:integer | last_name:keyword 10001 | Facello @@ -114,7 +114,7 @@ emp_no:integer | last_name:keyword ; renameOverride -from employees | rename emp_no as languages | keep languages, last_name | limit 3; +from employees | sort emp_no | rename emp_no as languages | keep languages, last_name | limit 3; languages:integer | last_name:keyword 10001 | Facello diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec index e114b00d4b9fa..efd1e3591e2b8 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/show.csv-spec @@ -36,6 +36,7 @@ is_finite |is_finite(arg1) is_infinite |is_infinite(arg1) is_nan |is_nan(arg1) least |least(first, rest...) +left |left(string, length) length |length(arg1) log10 |log10(n) ltrim |ltrim(arg1) @@ -55,6 +56,7 @@ now |now() percentile |percentile(arg1, arg2) pi |pi() pow |pow(base, exponent) +right |right(string, length) round |round(arg1, arg2) rtrim |rtrim(arg1) sin |sin(n) diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec index 4fcb4f257d273..db1800fabeed7 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/stats.csv-spec @@ -324,53 +324,53 @@ c:long | gender:keyword | hire_year_str:keyword ; byLongAndLong -from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(languages.long) by languages.long, trunk_worked_seconds | sort c desc; +from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(languages.long) by languages.long, trunk_worked_seconds | sort c desc, languages.long, trunk_worked_seconds; -c:long | languages.long:long | trunk_worked_seconds:long -15 | 5 | 300000000 -11 | 2 | 300000000 -10 | 4 | 300000000 - 9 | 3 | 200000000 - 8 | 2 | 200000000 - 8 | 4 | 200000000 - 8 | 3 | 300000000 - 8 | 1 | 200000000 - 7 | 1 | 300000000 - 6 | 5 | 200000000 +c:long | languages.long:long | trunk_worked_seconds:long +15 |5 |300000000 +11 |2 |300000000 +10 |4 |300000000 +9 |3 |200000000 +8 |1 |200000000 +8 |2 |200000000 +8 |3 |300000000 +8 |4 |200000000 +7 |1 |300000000 +6 |5 |200000000 ; byUnmentionedLongAndLong -from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages.long, trunk_worked_seconds | sort c desc; +from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages.long, trunk_worked_seconds | sort c desc, trunk_worked_seconds; c:long | languages.long:long | trunk_worked_seconds:long -13 | 5 | 300000000 -10 | 2 | 300000000 - 9 | 4 | 300000000 - 9 | 3 | 200000000 - 8 | 4 | 200000000 - 8 | 3 | 300000000 - 7 | 1 | 200000000 - 6 | 2 | 200000000 - 6 | 1 | 300000000 - 4 | 5 | 200000000 +13 |5 |300000000 +10 |2 |300000000 +9 |3 |200000000 +9 |4 |300000000 +8 |4 |200000000 +8 |3 |300000000 +7 |1 |200000000 +6 |2 |200000000 +6 |1 |300000000 +4 |5 |200000000 ; byUnmentionedIntAndLong -from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages, trunk_worked_seconds | sort c desc; +from employees | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 | stats c = count(gender) by languages, trunk_worked_seconds | sort c desc, languages, trunk_worked_seconds; c:long | languages:integer | trunk_worked_seconds:long - 13 | 5 | 300000000 - 10 | 2 | 300000000 - 9 | 4 | 300000000 - 9 | 3 | 200000000 - 8 | 4 | 200000000 - 8 | 3 | 300000000 - 7 | 1 | 200000000 - 6 | 2 | 200000000 - 6 | null | 300000000 - 6 | 1 | 300000000 - 4 | null | 200000000 - 4 | 5 | 200000000 +13 |5 |300000000 +10 |2 |300000000 +9 |3 |200000000 +9 |4 |300000000 +8 |3 |300000000 +8 |4 |200000000 +7 |1 |200000000 +6 |1 |300000000 +6 |2 |200000000 +6 |null |300000000 +4 |5 |200000000 +4 |null |200000000 ; byUnmentionedIntAndBoolean @@ -392,7 +392,7 @@ c:long | languages:integer | still_hired:boolean ; byDateAndKeywordAndInt -from employees | eval d = date_trunc(1 year, hire_date) | stats c = count(emp_no) by d, gender, languages | sort c desc, d, languages desc | limit 10; +from employees | eval d = date_trunc(1 year, hire_date) | stats c = count(emp_no) by d, gender, languages | sort c desc, d, languages desc, gender desc | limit 10; c:long | d:date | gender:keyword | languages:integer 3 | 1986-01-01T00:00:00.000Z | M | 2 @@ -408,7 +408,7 @@ c:long | d:date | gender:keyword | languages:integer ; byDateAndKeywordAndIntWithAlias -from employees | eval d = date_trunc(1 year, hire_date) | rename gender as g, languages as l, emp_no as e | keep d, g, l, e | stats c = count(e) by d, g, l | sort c desc, d, l desc | limit 10; +from employees | eval d = date_trunc(1 year, hire_date) | rename gender as g, languages as l, emp_no as e | keep d, g, l, e | stats c = count(e) by d, g, l | sort c desc, d, l desc, g desc | limit 10; c:long | d:date | g:keyword | l:integer 3 | 1986-01-01T00:00:00.000Z | M | 2 @@ -474,8 +474,7 @@ min(salary):i | max(salary):i | c:l ; statsWithLiterals -from employees | limit 10 | eval x = 1 | stats c = count(x) -; +from employees | limit 10 | eval x = 1 | stats c = count(x); c:l 10 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec index 51bd57d9dc1da..aa893e63e1a30 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/string.csv-spec @@ -55,6 +55,7 @@ emp_no:integer | first_name:keyword | f_S:boolean startsWithField from employees | where emp_no <= 10010 | eval f_l = starts_with(last_name, gender) | keep emp_no, last_name, gender, f_l; +ignoreOrder:true emp_no:integer | last_name:keyword | gender:keyword | f_l:boolean 10001 | Facello | M | false @@ -71,6 +72,7 @@ emp_no:integer | last_name:keyword | gender:keyword | f_l:boolean substring from employees | where emp_no <= 10010 | eval f_l = substring(last_name, 3) | keep emp_no, last_name, f_l; +ignoreOrder:true emp_no:integer | last_name:keyword | f_l:keyword 10001 | Facello | cello @@ -87,6 +89,7 @@ emp_no:integer | last_name:keyword | f_l:keyword substring with length from employees | where emp_no <= 10010 | eval f_l = substring(last_name, 3, 1) | keep emp_no, last_name, f_l; +ignoreOrder:true emp_no:integer | last_name:keyword | f_l:keyword 10001 | Facello | c @@ -103,6 +106,7 @@ emp_no:integer | last_name:keyword | f_l:keyword substring negative start from employees | where emp_no <= 10010 | eval f_l = substring(last_name, -3) | keep emp_no, last_name, f_l; +ignoreOrder:true emp_no:integer | last_name:keyword | f_l:keyword 10001 | Facello | llo @@ -119,6 +123,7 @@ emp_no:integer | last_name:keyword | f_l:keyword substring nested negative start from employees | where emp_no <= 10010 | eval f_l = substring(substring(last_name, -3),-1) | keep emp_no, last_name, f_l; +ignoreOrder:true emp_no:integer | last_name:keyword | f_l:keyword 10001 | Facello | o @@ -135,6 +140,7 @@ emp_no:integer | last_name:keyword | f_l:keyword substring length from employees | where emp_no <= 10010 | eval f_l = length(substring(last_name, 3)) | keep emp_no, last_name, f_l; +ignoreOrder:true emp_no:integer | last_name:keyword | f_l:integer 10001 | Facello | 5 @@ -151,6 +157,7 @@ emp_no:integer | last_name:keyword | f_l:integer substring pair from employees | where emp_no <= 10010 | eval x = substring(last_name, 1, 1), y = 1, z = substring("abcdef", y, y) | keep emp_no, last_name, x, z; +ignoreOrder:true emp_no:integer | last_name:keyword | x:keyword | z:keyword 10001 | Facello | F | a @@ -294,6 +301,7 @@ emp_no:integer | name:keyword // Note: no matches in MV returned in from employees | where job_positions in ("Internship", first_name) | keep emp_no, job_positions; +ignoreOrder:true emp_no:integer |job_positions:keyword 10048 |Internship @@ -302,7 +310,7 @@ emp_no:integer |job_positions:keyword in3VLNoNull // filtering for SVs, since IN uses EQUALS evaluators, that turn MVs into NULL -from employees | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval is_in = job_positions in ("Accountant", "Internship"); +from employees | sort emp_no | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval is_in = job_positions in ("Accountant", "Internship"); emp_no:integer |job_positions:keyword |is_in:boolean 10024 |Junior Developer |false @@ -311,7 +319,7 @@ emp_no:integer |job_positions:keyword |is_in:boolean ; in3VLWithNull -from employees | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval is_in = job_positions in ("Accountant", "Internship", null); +from employees | sort emp_no | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval is_in = job_positions in ("Accountant", "Internship", null); emp_no:integer |job_positions:keyword |is_in:boolean 10024 |Junior Developer |null @@ -320,7 +328,7 @@ emp_no:integer |job_positions:keyword |is_in:boolean ; in3VLWithComputedNull -from employees | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval nil = concat("", null) | eval is_in = job_positions in ("Accountant", "Internship", nil); +from employees | sort emp_no | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval nil = concat("", null) | eval is_in = job_positions in ("Accountant", "Internship", nil); emp_no:integer |job_positions:keyword |nil:keyword |is_in:boolean 10024 |Junior Developer |null |null @@ -329,7 +337,7 @@ emp_no:integer |job_positions:keyword |nil:keyword |is_in:boolean ; in3VLWithNullAsValue -from employees | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval is_in = null in ("Accountant", "Internship", null); +from employees | sort emp_no | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval is_in = null in ("Accountant", "Internship", null); emp_no:integer |job_positions:keyword |is_in:boolean 10024 |Junior Developer |null @@ -338,7 +346,7 @@ emp_no:integer |job_positions:keyword |is_in:boolean ; in3VLWithComputedNullAsValue -from employees | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval nil = concat("", null) | eval is_in = nil in ("Accountant", "Internship", null); +from employees | sort emp_no | where mv_count(job_positions) <= 1 | where emp_no >= 10024 | limit 3 | keep emp_no, job_positions | eval nil = concat("", null) | eval is_in = nil in ("Accountant", "Internship", null); emp_no:integer |job_positions:keyword |nil:keyword |is_in:boolean 10024 |Junior Developer |null |null @@ -436,7 +444,7 @@ min(salary):integer | max(salary):integer | job_positions:keyword ; convertFromString -from employees | eval positions = to_string(job_positions) | keep emp_no, positions, job_positions | limit 5; +from employees | sort emp_no | eval positions = to_string(job_positions) | keep emp_no, positions, job_positions | limit 5; emp_no:integer |positions:keyword |job_positions:keyword 10001 |[Accountant, Senior Python Developer] |[Accountant, Senior Python Developer] @@ -548,7 +556,7 @@ emp_no:integer |job_positions:keyword ; convertFromBoolean -from employees | eval rehired = to_string(is_rehired) | keep emp_no, rehired, is_rehired | limit 5; +from employees | sort emp_no | eval rehired = to_string(is_rehired) | keep emp_no, rehired, is_rehired | limit 5; emp_no:integer |rehired:string |is_rehired:boolean 10001 |[false, true] |[false, true] @@ -559,7 +567,7 @@ emp_no:integer |rehired:string |is_rehired:boolean ; convertFromDatetime -from employees | sort emp_no| eval hired_at = to_string(hire_date) | keep emp_no, hired_at, hire_date | limit 1; +from employees | sort emp_no | eval hired_at = to_string(hire_date) | keep emp_no, hired_at, hire_date | limit 1; emp_no:integer |hired_at:keyword |hire_date:date 10001 |1986-06-26T00:00:00.000Z |1986-06-26T00:00:00.000Z @@ -567,6 +575,7 @@ emp_no:integer |hired_at:keyword |hire_date:date convertFromIP from hosts | where host=="epsilon" | eval str0 = to_string(ip0) | keep str0, ip0; +ignoreOrder:true str0:keyword |ip0:ip ["fe80::cae2:65ff:fece:feb9", "fe80::cae2:65ff:fece:fec0", "fe80::cae2:65ff:fece:fec1"] |[fe80::cae2:65ff:fece:feb9, fe80::cae2:65ff:fece:fec0, fe80::cae2:65ff:fece:fec1] @@ -638,6 +647,7 @@ ROW a=[10, 9, 8] showTextFields from hosts | where host == "beta" | keep host, host_group, description; +ignoreOrder:true host:keyword | host_group:text | description:text beta | Kubernetes cluster | beta k8s server @@ -647,6 +657,7 @@ beta | Kubernetes cluster | [beta k8s server, beta k8s server2 lengthOfText from hosts | where host=="epsilon" | eval l1 = length(host_group), l2 = length(description) | keep l1, l2; +ignoreOrder:true l1:integer | l2:integer null | 19 @@ -656,6 +667,7 @@ null | 19 startsWithText from hosts | where host=="epsilon" | eval l1 = starts_with(host_group, host), l2 = starts_with(description, host) | keep l1, l2; +ignoreOrder:true l1:boolean | l2:boolean null | true @@ -665,6 +677,7 @@ false | null substringOfText from hosts | where host=="epsilon" | eval l1 = substring(host_group, 0, 5), l2 = substring(description, 0, 5) | keep l1, l2; +ignoreOrder:true l1:keyword | l2:keyword null | epsil @@ -700,3 +713,43 @@ Gateway | instances Gateway | instances null | null ; + +left +// tag::left[] +FROM employees +| KEEP last_name +| EVAL left = LEFT(last_name, 3) +| SORT last_name ASC +| LIMIT 5 +// end::left[] +; + +// tag::left-result[] +last_name:keyword | left:keyword +Awdeh |Awd +Azuma |Azu +Baek |Bae +Bamford |Bam +Bernatsky |Ber +// end::left-result[] +; + +right +// tag::right[] +FROM employees +| KEEP last_name +| EVAL right = RIGHT(last_name, 3) +| SORT last_name ASC +| LIMIT 5 +// end::right[] +; + +// tag::right-result[] +last_name:keyword | right:keyword +Awdeh |deh +Azuma |uma +Baek |aek +Bamford |ord +Bernatsky |sky +// end::right-result[] +; diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/topN.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/topN.csv-spec index c8d209f932ad6..e09bc933340d1 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/topN.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/topN.csv-spec @@ -42,29 +42,29 @@ epsilon |eth2 |[fe81::cae2:65ff:fece:feb9, fe82::cae2:65ff:fece ; complexMultiSortingFields_SameFieldAscAndDesc -from employees | sort job_positions asc, languages nulls first, gender desc, job_positions desc | limit 20 | keep job_positions, languages, gender; +from employees | sort job_positions asc, languages nulls first, gender desc, job_positions desc, emp_no | limit 20 | keep job_positions, languages, gender, emp_no; - job_positions:keyword |languages:integer|gender:keyword -[Accountant, Internship, Python Developer, Tech Lead] |null |M -Accountant |null |M -[Accountant, Purchase Manager, Support Engineer] |null |F -[Accountant, Business Analyst, Data Scientist, Python Developer] |1 |M -[Accountant, Principal Support Engineer, Support Engineer, Tech Lead] |1 |F -[Accountant, Junior Developer] |1 |F -[Accountant, Purchase Manager, Python Developer, Reporting Analyst] |2 |null -[Accountant, Senior Python Developer, Tech Lead] |2 |M -[Accountant, Junior Developer, Reporting Analyst, Support Engineer] |2 |M -[Accountant, Senior Python Developer] |2 |M -[Accountant, Internship] |2 |M -[Accountant, Junior Developer, Principal Support Engineer, Purchase Manager]|3 |M -[Accountant, Business Analyst, Purchase Manager] |3 |M -[Accountant, Junior Developer] |4 |F -[Accountant, Head Human Resources] |5 |null -[Accountant, Business Analyst, Senior Python Developer, Tech Lead] |5 |M -[Accountant, Data Scientist, Internship, Senior Python Developer] |5 |M -[Accountant, Principal Support Engineer, Senior Python Developer] |5 |F -[Architect, Principal Support Engineer, Purchase Manager, Senior Team Lead] |2 |F -[Architect, Internship, Principal Support Engineer] |2 |F + job_positions:keyword |languages:integer|gender:keyword | emp_no:integer +[Accountant, Internship, Python Developer, Tech Lead] |null |M |10028 +Accountant |null |M |10025 +[Accountant, Purchase Manager, Support Engineer] |null |F |10023 +[Accountant, Business Analyst, Data Scientist, Python Developer] |1 |M |10034 +[Accountant, Principal Support Engineer, Support Engineer, Tech Lead] |1 |F |10044 +[Accountant, Junior Developer] |1 |F |10092 +[Accountant, Purchase Manager, Python Developer, Reporting Analyst] |2 |null |10016 +[Accountant, Senior Python Developer, Tech Lead] |2 |M |10037 +[Accountant, Junior Developer, Reporting Analyst, Support Engineer] |2 |M |10050 +[Accountant, Senior Python Developer] |2 |M |10001 +[Accountant, Internship] |2 |M |10081 +[Accountant, Junior Developer, Principal Support Engineer, Purchase Manager]|3 |M |10045 +[Accountant, Business Analyst, Purchase Manager] |3 |M |10051 +[Accountant, Junior Developer] |4 |F |10089 +[Accountant, Head Human Resources] |5 |null |10012 +[Accountant, Business Analyst, Senior Python Developer, Tech Lead] |5 |M |10085 +[Accountant, Data Scientist, Internship, Senior Python Developer] |5 |M |10066 +[Accountant, Principal Support Engineer, Senior Python Developer] |5 |F |10094 +[Architect, Principal Support Engineer, Purchase Manager, Senior Team Lead] |2 |F |10059 +[Architect, Internship, Principal Support Engineer] |2 |F |10078 ; sortingOnMVDoubles diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec index 2566fc5845f86..9f5d7be3e63e0 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/unsigned_long.csv-spec @@ -91,7 +91,7 @@ from ul_logs | where bytes_in == bytes_out; ; filterOnFieldsInequality -from ul_logs | where bytes_in < bytes_out | eval b_in = bytes_in / to_ul(pow(10.,15)), b_out = bytes_out / to_ul(pow(10.,15)) | limit 5; +from ul_logs | sort id | where bytes_in < bytes_out | eval b_in = bytes_in / to_ul(pow(10.,15)), b_out = bytes_out / to_ul(pow(10.,15)) | limit 5; @timestamp:date | bytes_in:ul | bytes_out:ul | id:i | status:k | b_in:ul | b_out:ul 2017-11-10T21:15:54.000Z|4348801185987554667 |12749081495402663265|1 |OK |4348 |12749 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec index b7b3ca1c99d3c..df1fa6e67f279 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/version.csv-spec @@ -6,6 +6,7 @@ selectAll FROM apps; +ignoreOrder:true id:integer |name:keyword |version:version 1 |aaaaa |1 diff --git a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec index 36e92c723354b..c513f6670b044 100644 --- a/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec +++ b/x-pack/plugin/esql/qa/testFixtures/src/main/resources/where-like.csv-spec @@ -43,6 +43,7 @@ emp_no:integer | first_name:keyword | last_name:keyword likeAndOr from employees | where first_name like "Eberhar*" or first_name like "*zuh*" and last_name like "*eha" | keep emp_no, first_name, last_name; +ignoreOrder:true emp_no:integer | first_name:keyword | last_name:keyword 10013 | Eberhardt | Terkki @@ -180,6 +181,7 @@ emp_no:integer | first_name:keyword | last_name:keyword rLikeAndOr from employees | where first_name rlike "Eberhar.*" or first_name rlike ".*zuh.*" and last_name rlike ".*eha" | keep emp_no, first_name, last_name; +ignoreOrder:true emp_no:integer | first_name:keyword | last_name:keyword 10013 | Eberhardt | Terkki diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java index 12be88a28a2f7..c567edda97018 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionIT.java @@ -51,6 +51,7 @@ import static java.util.Comparator.naturalOrder; import static java.util.Comparator.reverseOrder; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.contains; @@ -77,21 +78,21 @@ public void setupIndex() { public void testProjectConstant() { EsqlQueryResponse results = run("from test | eval x = 1 | keep x"); assertThat(results.columns(), equalTo(List.of(new ColumnInfo("x", "integer")))); - assertThat(results.values().size(), equalTo(40)); - assertThat(results.values().get(0).get(0), equalTo(1)); + assertThat(getValuesList(results).size(), equalTo(40)); + assertThat(getValuesList(results).get(0).get(0), equalTo(1)); } public void testStatsOverConstant() { EsqlQueryResponse results = run("from test | eval x = 1 | stats x = count(x)"); assertThat(results.columns(), equalTo(List.of(new ColumnInfo("x", "long")))); - assertThat(results.values().size(), equalTo(1)); - assertThat(results.values().get(0).get(0), equalTo(40L)); + assertThat(getValuesList(results).size(), equalTo(1)); + assertThat(getValuesList(results).get(0).get(0), equalTo(40L)); } public void testRow() { long value = randomLongBetween(0, Long.MAX_VALUE); EsqlQueryResponse response = run("row " + value); - assertEquals(List.of(List.of(value)), response.values()); + assertEquals(List.of(List.of(value)), getValuesList(response)); } public void testFromStatsGroupingAvgWithSort() { @@ -120,7 +121,7 @@ private void testFromStatsGroupingAvgImpl(String command, String expectedGroupNa assertEquals("long", groupColumn.type()); // assert column values - List> valueValues = results.values(); + List> valueValues = getValuesList(results); assertEquals(2, valueValues.size()); // This is loathsome, find a declarative way to assert the expected output. if ((long) valueValues.get(0).get(1) == 1L) { @@ -158,7 +159,7 @@ private void testFromStatsGroupingCountImpl(String command, String expectedField assertEquals("long", valuesColumn.type()); // assert column values - List> valueValues = results.values(); + List> valueValues = getValuesList(results); assertEquals(2, valueValues.size()); // This is loathsome, find a declarative way to assert the expected output. if ((long) valueValues.get(0).get(1) == 1L) { @@ -179,7 +180,7 @@ public void testFromStatsGroupingByDate() { EsqlQueryResponse results = run("from test | stats avg(count) by time"); logger.info(results); Assert.assertEquals(2, results.columns().size()); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); // assert column metadata assertEquals("avg(count)", results.columns().get(0).name()); @@ -189,7 +190,11 @@ public void testFromStatsGroupingByDate() { // assert column values List expectedValues = LongStream.range(0, 40).map(i -> epoch + i).sorted().boxed().toList(); - List actualValues = IntStream.range(0, 40).mapToLong(i -> (Long) results.values().get(i).get(1)).sorted().boxed().toList(); + List actualValues = IntStream.range(0, 40) + .mapToLong(i -> (Long) getValuesList(results).get(i).get(1)) + .sorted() + .boxed() + .toList(); assertEquals(expectedValues, actualValues); } @@ -216,7 +221,7 @@ public void testFromGroupingByNumericFieldWithNulls() { record Group(Long data, Double avg) {} List expectedGroups = List.of(new Group(1L, 42.0), new Group(2L, 44.0), new Group(99L, null), new Group(null, 12.0)); - List actualGroups = results.values().stream().map(l -> new Group((Long) l.get(1), (Double) l.get(0))).toList(); + List actualGroups = getValuesList(results).stream().map(l -> new Group((Long) l.get(1), (Double) l.get(0))).toList(); assertThat(actualGroups, equalTo(expectedGroups)); } @@ -224,7 +229,7 @@ public void testFromStatsGroupingByKeyword() { EsqlQueryResponse results = run("from test | stats avg(count) by color"); logger.info(results); Assert.assertEquals(2, results.columns().size()); - Assert.assertEquals(3, results.values().size()); + Assert.assertEquals(3, getValuesList(results).size()); // assert column metadata assertEquals("avg(count)", results.columns().get(0).name()); @@ -235,8 +240,7 @@ record Group(String color, double avg) { } List expectedGroups = List.of(new Group("blue", 42.0), new Group("green", 44.0), new Group("red", 43)); - List actualGroups = results.values() - .stream() + List actualGroups = getValuesList(results).stream() .map(l -> new Group((String) l.get(1), (Double) l.get(0))) .sorted(comparing(c -> c.color)) .toList(); @@ -259,7 +263,7 @@ public void testFromStatsGroupingByKeywordWithNulls() { EsqlQueryResponse results = run("from test | stats avg = avg(" + field + ") by color"); logger.info(results); Assert.assertEquals(2, results.columns().size()); - Assert.assertEquals(4, results.values().size()); + Assert.assertEquals(4, getValuesList(results).size()); // assert column metadata assertEquals("avg", results.columns().get(0).name()); @@ -275,8 +279,7 @@ record Group(String color, Double avg) { new Group("red", 43.0), new Group("yellow", null) ); - List actualGroups = results.values() - .stream() + List actualGroups = getValuesList(results).stream() .map(l -> new Group((String) l.get(1), (Double) l.get(0))) .sorted(comparing(c -> c.color)) .toList(); @@ -298,7 +301,7 @@ public void testFromStatsMultipleAggs() { ); logger.info(results); Assert.assertEquals(6, results.columns().size()); - Assert.assertEquals(3, results.values().size()); + Assert.assertEquals(3, getValuesList(results).size()); // assert column metadata assertEquals("a", results.columns().get(0).name()); @@ -320,8 +323,7 @@ record Group(double avg, long mi, long ma, long s, long c, String color) {} new Group(43, 40, 46, 860, 20, "red") ); // TODO: each aggregator returns Double now, it should in fact mirror the data type of the fields it's aggregating - List actualGroups = results.values() - .stream() + List actualGroups = getValuesList(results).stream() .map(l -> new Group((Double) l.get(0), (Long) l.get(1), (Long) l.get(2), (Long) l.get(3), (Long) l.get(4), (String) l.get(5))) .sorted(comparing(c -> c.color)) .toList(); @@ -332,7 +334,7 @@ public void testFromSortWithTieBreakerLimit() { EsqlQueryResponse results = run("from test | sort data, count desc, time | limit 5 | keep data, count, time"); logger.info(results); assertThat( - results.values(), + getValuesList(results), contains( List.of(1L, 44L, epoch + 2), List.of(1L, 44L, epoch + 6), @@ -348,7 +350,7 @@ public void testFromStatsProjectGroup() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("data")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("long")); - assertThat(results.values(), containsInAnyOrder(List.of(1L), List.of(2L))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(1L), List.of(2L))); } public void testRowStatsProjectGroupByInt() { @@ -356,7 +358,7 @@ public void testRowStatsProjectGroupByInt() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("integer")); - assertThat(results.values(), contains(List.of(1))); + assertThat(getValuesList(results), contains(List.of(1))); } public void testRowStatsProjectGroupByLong() { @@ -364,7 +366,7 @@ public void testRowStatsProjectGroupByLong() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("long")); - assertThat(results.values(), contains(List.of(1000000000000L))); + assertThat(getValuesList(results), contains(List.of(1000000000000L))); } public void testRowStatsProjectGroupByDouble() { @@ -372,7 +374,7 @@ public void testRowStatsProjectGroupByDouble() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); - assertThat(results.values(), contains(List.of(1.0))); + assertThat(getValuesList(results), contains(List.of(1.0))); } public void testRowStatsProjectGroupByKeyword() { @@ -380,7 +382,7 @@ public void testRowStatsProjectGroupByKeyword() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("keyword")); - assertThat(results.values(), contains(List.of("hello"))); + assertThat(getValuesList(results), contains(List.of("hello"))); } public void testFromStatsProjectGroupByDouble() { @@ -388,7 +390,7 @@ public void testFromStatsProjectGroupByDouble() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("data_d")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); - assertThat(results.values(), containsInAnyOrder(List.of(1.0), List.of(2.0))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(1.0), List.of(2.0))); } public void testFromStatsProjectGroupWithAlias() { @@ -397,7 +399,7 @@ public void testFromStatsProjectGroupWithAlias() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("d", "d2")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("long", "long")); - assertThat(results.values(), containsInAnyOrder(List.of(1L, 1L), List.of(2L, 2L))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(1L, 1L), List.of(2L, 2L))); } public void testFromStatsProjectAgg() { @@ -405,7 +407,7 @@ public void testFromStatsProjectAgg() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("a")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); - assertThat(results.values(), containsInAnyOrder(List.of(42d), List.of(44d))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(42d), List.of(44d))); } public void testFromStatsProjectAggWithAlias() { @@ -413,7 +415,7 @@ public void testFromStatsProjectAggWithAlias() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("b")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double")); - assertThat(results.values(), containsInAnyOrder(List.of(42d), List.of(44d))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(42d), List.of(44d))); } public void testFromProjectStatsGroupByAlias() { @@ -421,7 +423,7 @@ public void testFromProjectStatsGroupByAlias() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("avg(count)", "d")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double", "long")); - assertThat(results.values(), containsInAnyOrder(List.of(42d, 1L), List.of(44d, 2L))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(42d, 1L), List.of(44d, 2L))); } public void testFromProjectStatsAggregateAlias() { @@ -429,36 +431,36 @@ public void testFromProjectStatsAggregateAlias() { logger.info(results); assertThat(results.columns().stream().map(ColumnInfo::name).toList(), contains("avg(c)", "data")); assertThat(results.columns().stream().map(ColumnInfo::type).toList(), contains("double", "long")); - assertThat(results.values(), containsInAnyOrder(List.of(42d, 1L), List.of(44d, 2L))); + assertThat(getValuesList(results), containsInAnyOrder(List.of(42d, 1L), List.of(44d, 2L))); } public void testFromEvalStats() { EsqlQueryResponse results = run("from test | eval ratio = data_d / count_d | stats avg(ratio)"); logger.info(results); Assert.assertEquals(1, results.columns().size()); - Assert.assertEquals(1, results.values().size()); + Assert.assertEquals(1, getValuesList(results).size()); assertEquals("avg(ratio)", results.columns().get(0).name()); assertEquals("double", results.columns().get(0).type()); - assertEquals(1, results.values().get(0).size()); - assertEquals(0.034d, (double) results.values().get(0).get(0), 0.001d); + assertEquals(1, getValuesList(results).get(0).size()); + assertEquals(0.034d, (double) getValuesList(results).get(0).get(0), 0.001d); } public void testFromStatsEvalWithPragma() { assumeTrue("pragmas only enabled on snapshot builds", Build.current().isSnapshot()); EsqlQueryResponse results = run("from test | stats avg_count = avg(count) | eval x = avg_count + 7"); logger.info(results); - Assert.assertEquals(1, results.values().size()); - assertEquals(2, results.values().get(0).size()); - assertEquals(50, (double) results.values().get(0).get(results.columns().indexOf(new ColumnInfo("x", "double"))), 1d); - assertEquals(43, (double) results.values().get(0).get(results.columns().indexOf(new ColumnInfo("avg_count", "double"))), 1d); + Assert.assertEquals(1, getValuesList(results).size()); + assertEquals(2, getValuesList(results).get(0).size()); + assertEquals(50, (double) getValuesList(results).get(0).get(results.columns().indexOf(new ColumnInfo("x", "double"))), 1d); + assertEquals(43, (double) getValuesList(results).get(0).get(results.columns().indexOf(new ColumnInfo("avg_count", "double"))), 1d); } public void testWhere() { EsqlQueryResponse results = run("from test | where count > 40"); logger.info(results); - Assert.assertEquals(30, results.values().size()); + Assert.assertEquals(30, getValuesList(results).size()); var countIndex = results.columns().indexOf(new ColumnInfo("count", "long")); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThan(40L)); } } @@ -466,9 +468,9 @@ public void testWhere() { public void testProjectWhere() { EsqlQueryResponse results = run("from test | keep count | where count > 40"); logger.info(results); - Assert.assertEquals(30, results.values().size()); + Assert.assertEquals(30, getValuesList(results).size()); int countIndex = results.columns().indexOf(new ColumnInfo("count", "long")); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThan(40L)); } } @@ -476,9 +478,9 @@ public void testProjectWhere() { public void testEvalWhere() { EsqlQueryResponse results = run("from test | eval x = count / 2 | where x > 20"); logger.info(results); - Assert.assertEquals(30, results.values().size()); + Assert.assertEquals(30, getValuesList(results).size()); int countIndex = results.columns().indexOf(new ColumnInfo("x", "long")); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThan(20L)); } } @@ -486,15 +488,15 @@ public void testEvalWhere() { public void testFilterWithNullAndEval() { EsqlQueryResponse results = run("row a = 1 | eval b = a + null | where b > 1"); logger.info(results); - Assert.assertEquals(0, results.values().size()); + Assert.assertEquals(0, getValuesList(results).size()); } public void testStringLength() { EsqlQueryResponse results = run("from test | eval l = length(color)"); logger.info(results); - assertThat(results.values(), hasSize(40)); + assertThat(getValuesList(results), hasSize(40)); int countIndex = results.columns().indexOf(new ColumnInfo("l", "integer")); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Integer) values.get(countIndex), greaterThanOrEqualTo(3)); } } @@ -506,11 +508,11 @@ public void testFilterWithNullAndEvalFromIndex() { client().admin().indices().prepareRefresh("test").get(); // sanity EsqlQueryResponse results = run("from test"); - Assert.assertEquals(41, results.values().size()); + Assert.assertEquals(41, getValuesList(results).size()); results = run("from test | eval newCount = count + 1 | where newCount > 1"); logger.info(results); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("count", "long")))); assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("count_d", "double")))); assertThat(results.columns(), hasItem(equalTo(new ColumnInfo("data", "long")))); @@ -520,7 +522,7 @@ public void testFilterWithNullAndEvalFromIndex() { // restore index to original pre-test state client().prepareBulk().add(new DeleteRequest("test").id("no_count")).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE).get(); results = run("from test"); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); } public void testMultiConditionalWhere() { @@ -528,9 +530,9 @@ public void testMultiConditionalWhere() { "from test | eval abc = 1+2 | where (abc + count >= 44 or data_d == 2) and data == 1 | keep color, abc" ); logger.info(results); - Assert.assertEquals(10, results.values().size()); + Assert.assertEquals(10, getValuesList(results).size()); Assert.assertEquals(2, results.columns().size()); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((String) values.get(0), equalTo("green")); assertThat((Integer) values.get(1), equalTo(3)); } @@ -539,9 +541,9 @@ public void testMultiConditionalWhere() { public void testWhereNegatedCondition() { EsqlQueryResponse results = run("from test | eval abc=1+2 | where abc + count > 45 and data != 1 | keep color, data"); logger.info(results); - Assert.assertEquals(10, results.values().size()); + Assert.assertEquals(10, getValuesList(results).size()); Assert.assertEquals(2, results.columns().size()); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((String) values.get(0), equalTo("red")); assertThat((Long) values.get(1), equalTo(2L)); } @@ -550,11 +552,11 @@ public void testWhereNegatedCondition() { public void testEvalOverride() { EsqlQueryResponse results = run("from test | eval count = count + 1 | eval count = count + 1"); logger.info(results); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); Assert.assertEquals(1, results.columns().stream().filter(c -> c.name().equals("count")).count()); int countIndex = results.columns().size() - 1; Assert.assertEquals(new ColumnInfo("count", "long"), results.columns().get(countIndex)); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(countIndex), greaterThanOrEqualTo(42L)); } } @@ -562,9 +564,9 @@ public void testEvalOverride() { public void testProjectRename() { EsqlQueryResponse results = run("from test | eval y = count | rename count as x | keep x, y"); logger.info(results); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); assertThat(results.columns(), contains(new ColumnInfo("x", "long"), new ColumnInfo("y", "long"))); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(0), greaterThanOrEqualTo(40L)); assertThat(values.get(1), is(values.get(0))); } @@ -573,12 +575,12 @@ public void testProjectRename() { public void testProjectRenameEval() { EsqlQueryResponse results = run("from test | eval y = count | rename count as x | keep x, y | eval x2 = x + 1 | eval y2 = y + 2"); logger.info(results); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); assertThat( results.columns(), contains(new ColumnInfo("x", "long"), new ColumnInfo("y", "long"), new ColumnInfo("x2", "long"), new ColumnInfo("y2", "long")) ); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(0), greaterThanOrEqualTo(40L)); assertThat(values.get(1), is(values.get(0))); assertThat(values.get(2), is(((Long) values.get(0)) + 1)); @@ -589,9 +591,9 @@ public void testProjectRenameEval() { public void testProjectRenameEvalProject() { EsqlQueryResponse results = run("from test | eval y = count | rename count as x | keep x, y | eval z = x + y | keep x, y, z"); logger.info(results); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); assertThat(results.columns(), contains(new ColumnInfo("x", "long"), new ColumnInfo("y", "long"), new ColumnInfo("z", "long"))); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat((Long) values.get(0), greaterThanOrEqualTo(40L)); assertThat(values.get(1), is(values.get(0))); assertThat(values.get(2), is((Long) values.get(0) * 2)); @@ -601,9 +603,9 @@ public void testProjectRenameEvalProject() { public void testProjectOverride() { EsqlQueryResponse results = run("from test | eval cnt = count | rename count as data | keep cnt, data"); logger.info(results); - Assert.assertEquals(40, results.values().size()); + Assert.assertEquals(40, getValuesList(results).size()); assertThat(results.columns(), contains(new ColumnInfo("cnt", "long"), new ColumnInfo("data", "long"))); - for (List values : results.values()) { + for (List values : getValuesList(results)) { assertThat(values.get(1), is(values.get(0))); } } @@ -674,7 +676,7 @@ public void testRefreshSearchIdleShards() throws Exception { } EsqlQueryResponse results = run("from test_refresh | stats s = sum(value)"); logger.info(results); - assertThat(results.values().get(0), equalTo(List.of(totalValues.get()))); + assertThat(getValuesList(results).get(0), equalTo(List.of(totalValues.get()))); } public void testESFilter() throws Exception { @@ -708,9 +710,9 @@ public void testESFilter() throws Exception { logger.info(results); OptionalDouble avg = docs.values().stream().filter(v -> from <= v && v <= to).mapToLong(n -> n).average(); if (avg.isPresent()) { - assertEquals(avg.getAsDouble(), (double) results.values().get(0).get(0), 0.01d); + assertEquals(avg.getAsDouble(), (double) getValuesList(results).get(0).get(0), 0.01d); } else { - assertThat(results.values().get(0).get(0), nullValue()); + assertThat(getValuesList(results).get(0).get(0), nullValue()); } } @@ -744,12 +746,12 @@ record Doc(long val, String tag) { logger.info(results); // _doc, _segment, _shard are pruned assertThat(results.columns().size(), equalTo(2)); - assertThat(results.values(), hasSize(Math.min(limit, numDocs))); + assertThat(getValuesList(results), hasSize(Math.min(limit, numDocs))); assertThat(results.columns().get(1).name(), equalTo("val")); assertThat(results.columns().get(0).name(), equalTo("tag")); List actualDocs = new ArrayList<>(); - for (int i = 0; i < results.values().size(); i++) { - List values = results.values().get(i); + for (int i = 0; i < getValuesList(results).size(); i++) { + List values = getValuesList(results).get(i); actualDocs.add(new Doc((Long) values.get(1), (String) values.get(0))); } assertThat(actualDocs, equalTo(allDocs.stream().limit(limit).toList())); @@ -759,25 +761,25 @@ public void testEvalWithNullAndAvg() { EsqlQueryResponse results = run("from test | eval nullsum = count_d + null | stats avg(nullsum)"); logger.info(results); Assert.assertEquals(1, results.columns().size()); - Assert.assertEquals(1, results.values().size()); + Assert.assertEquals(1, getValuesList(results).size()); assertEquals("avg(nullsum)", results.columns().get(0).name()); assertEquals("double", results.columns().get(0).type()); - assertEquals(1, results.values().get(0).size()); - assertNull(results.values().get(0).get(0)); + assertEquals(1, getValuesList(results).get(0).size()); + assertNull(getValuesList(results).get(0).get(0)); } public void testFromStatsLimit() { EsqlQueryResponse results = run("from test | stats ac = avg(count) by data | limit 1"); logger.info(results); assertThat(results.columns(), contains(new ColumnInfo("ac", "double"), new ColumnInfo("data", "long"))); - assertThat(results.values(), contains(anyOf(contains(42.0, 1L), contains(44.0, 2L)))); + assertThat(getValuesList(results), contains(anyOf(contains(42.0, 1L), contains(44.0, 2L)))); } public void testFromLimit() { EsqlQueryResponse results = run("from test | keep data | limit 2"); logger.info(results); assertThat(results.columns(), contains(new ColumnInfo("data", "long"))); - assertThat(results.values(), contains(anyOf(contains(1L), contains(2L)), anyOf(contains(1L), contains(2L)))); + assertThat(getValuesList(results), contains(anyOf(contains(1L), contains(2L)), anyOf(contains(1L), contains(2L)))); } public void testDropAllColumns() { @@ -785,14 +787,14 @@ public void testDropAllColumns() { logger.info(results); assertThat(results.columns(), hasSize(1)); assertThat(results.columns(), contains(new ColumnInfo("a", "integer"))); - assertThat(results.values(), is(empty())); + assertThat(getValuesList(results), is(empty())); } public void testDropAllColumnsWithStats() { EsqlQueryResponse results = run("from test | stats g = count(data) | drop g"); logger.info(results); assertThat(results.columns(), is(empty())); - assertThat(results.values(), is(empty())); + assertThat(getValuesList(results), is(empty())); } public void testIndexPatterns() throws Exception { @@ -819,34 +821,34 @@ public void testIndexPatterns() throws Exception { } EsqlQueryResponse results = run("from test_index_patterns* | stats count(data), sum(count)"); - assertEquals(1, results.values().size()); - assertEquals(15L, results.values().get(0).get(0)); - assertEquals(120000L, results.values().get(0).get(1)); + assertEquals(1, getValuesList(results).size()); + assertEquals(15L, getValuesList(results).get(0).get(0)); + assertEquals(120000L, getValuesList(results).get(0).get(1)); results = run("from test_index_patterns_1,test_index_patterns_2 | stats count(data), sum(count)"); - assertEquals(1, results.values().size()); - assertEquals(10L, results.values().get(0).get(0)); - assertEquals(55000L, results.values().get(0).get(1)); + assertEquals(1, getValuesList(results).size()); + assertEquals(10L, getValuesList(results).get(0).get(0)); + assertEquals(55000L, getValuesList(results).get(0).get(1)); results = run("from test_index_patterns_1*,test_index_patterns_2* | stats count(data), sum(count)"); - assertEquals(1, results.values().size()); - assertEquals(10L, results.values().get(0).get(0)); - assertEquals(55000L, results.values().get(0).get(1)); + assertEquals(1, getValuesList(results).size()); + assertEquals(10L, getValuesList(results).get(0).get(0)); + assertEquals(55000L, getValuesList(results).get(0).get(1)); results = run("from test_index_patterns_*,-test_index_patterns_1 | stats count(data), sum(count)"); - assertEquals(1, results.values().size()); - assertEquals(10L, results.values().get(0).get(0)); - assertEquals(105000L, results.values().get(0).get(1)); + assertEquals(1, getValuesList(results).size()); + assertEquals(10L, getValuesList(results).get(0).get(0)); + assertEquals(105000L, getValuesList(results).get(0).get(1)); results = run("from * | stats count(data), sum(count)"); - assertEquals(1, results.values().size()); - assertEquals(55L, results.values().get(0).get(0)); - assertEquals(121720L, results.values().get(0).get(1)); + assertEquals(1, getValuesList(results).size()); + assertEquals(55L, getValuesList(results).get(0).get(0)); + assertEquals(121720L, getValuesList(results).get(0).get(1)); results = run("from test_index_patterns_2 | stats count(data), sum(count)"); - assertEquals(1, results.values().size()); - assertEquals(5L, results.values().get(0).get(0)); - assertEquals(40000L, results.values().get(0).get(1)); + assertEquals(1, getValuesList(results).size()); + assertEquals(5L, getValuesList(results).get(0).get(0)); + assertEquals(40000L, getValuesList(results).get(0).get(1)); } public void testOverlappingIndexPatterns() throws Exception { @@ -887,7 +889,7 @@ public void testEmptyIndex() { assertAcked(client().admin().indices().prepareCreate("test_empty").setMapping("k", "type=keyword", "v", "type=long").get()); EsqlQueryResponse results = run("from test_empty"); assertThat(results.columns(), equalTo(List.of(new ColumnInfo("k", "keyword"), new ColumnInfo("v", "long")))); - assertThat(results.values(), empty()); + assertThat(getValuesList(results), empty()); } public void testShowInfo() { @@ -896,22 +898,22 @@ public void testShowInfo() { results.columns(), equalTo(List.of(new ColumnInfo("version", "keyword"), new ColumnInfo("date", "keyword"), new ColumnInfo("hash", "keyword"))) ); - assertThat(results.values().size(), equalTo(1)); - assertThat(results.values().get(0).get(0), equalTo(Build.current().version())); - assertThat(results.values().get(0).get(1), equalTo(Build.current().date())); - assertThat(results.values().get(0).get(2), equalTo(Build.current().hash())); + assertThat(getValuesList(results).size(), equalTo(1)); + assertThat(getValuesList(results).get(0).get(0), equalTo(Build.current().version())); + assertThat(getValuesList(results).get(0).get(1), equalTo(Build.current().date())); + assertThat(getValuesList(results).get(0).get(2), equalTo(Build.current().hash())); } public void testShowFunctions() { EsqlQueryResponse results = run("show functions"); assertThat(results.columns(), equalTo(List.of(new ColumnInfo("name", "keyword"), new ColumnInfo("synopsis", "keyword")))); - assertThat(results.values().size(), equalTo(new EsqlFunctionRegistry().listFunctions().size())); + assertThat(getValuesList(results).size(), equalTo(new EsqlFunctionRegistry().listFunctions().size())); } public void testInWithNullValue() { EsqlQueryResponse results = run("from test | where null in (data, 2) | keep data"); assertThat(results.columns(), equalTo(List.of(new ColumnInfo("data", "long")))); - assertThat(results.values().size(), equalTo(0)); + assertThat(getValuesList(results).size(), equalTo(0)); } public void testTopNPushedToLucene() { @@ -948,7 +950,7 @@ public void testTopNPushedToLucene() { """); logger.info(results); Assert.assertEquals(3, results.columns().size()); - Assert.assertEquals(10, results.values().size()); + Assert.assertEquals(10, getValuesList(results).size()); // assert column metadata assertEquals("data", results.columns().get(0).name()); @@ -975,8 +977,7 @@ record Group(Long data, Long count, String color) { new Group(9L, null), new Group(9L, 90L) ); - List actualGroups = results.values() - .stream() + List actualGroups = getValuesList(results).stream() .map(l -> new Group((Long) l.get(0), (Long) l.get(1), (String) l.get(2))) .toList(); assertThat(actualGroups, equalTo(expectedGroups)); @@ -1002,7 +1003,7 @@ public void testTopNPushedToLuceneOnSortedIndex() { EsqlQueryResponse results = run("from sorted_test_index | sort time " + sortOrder + " | limit " + limit + " | keep time"); logger.info(results); Assert.assertEquals(1, results.columns().size()); - Assert.assertEquals(limit, results.values().size()); + Assert.assertEquals(limit, getValuesList(results).size()); // assert column metadata assertEquals("time", results.columns().get(0).name()); @@ -1015,7 +1016,7 @@ public void testTopNPushedToLuceneOnSortedIndex() { .sorted(sortedDesc ? reverseOrder() : naturalOrder()) .limit(limit) .toList(); - var actual = results.values().stream().map(l -> (Long) l.get(0)).toList(); + var actual = getValuesList(results).stream().map(l -> (Long) l.get(0)).toList(); assertThat(actual, equalTo(expected)); // clean-up @@ -1129,8 +1130,8 @@ private void assertNoNestedDocuments(String query, int docsCount, long minValue, EsqlQueryResponse results = run(query); assertThat(results.columns(), contains(new ColumnInfo("data", "long"))); assertThat(results.columns().size(), is(1)); - assertThat(results.values().size(), is(docsCount)); - for (List row : results.values()) { + assertThat(getValuesList(results).size(), is(docsCount)); + for (List row : getValuesList(results)) { assertThat(row.size(), is(1)); // check that all the values returned are the regular ones assertThat((Long) row.get(0), allOf(greaterThanOrEqualTo(minValue), lessThanOrEqualTo(maxValue))); diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java index 6b271debcf2c3..bdbcd9a548f58 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlActionRuntimeFieldIT.java @@ -34,6 +34,7 @@ import java.util.Set; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.equalTo; /** @@ -53,19 +54,19 @@ protected Collection> nodePlugins() { public void testLong() throws InterruptedException, IOException { createIndexWithConstRuntimeField("long"); EsqlQueryResponse response = run("from test | stats sum(const)"); - assertThat(response.values(), equalTo(List.of(List.of((long) SIZE)))); + assertThat(getValuesList(response), equalTo(List.of(List.of((long) SIZE)))); } public void testDouble() throws InterruptedException, IOException { createIndexWithConstRuntimeField("double"); EsqlQueryResponse response = run("from test | stats sum(const)"); - assertThat(response.values(), equalTo(List.of(List.of((double) SIZE)))); + assertThat(getValuesList(response), equalTo(List.of(List.of((double) SIZE)))); } public void testKeyword() throws InterruptedException, IOException { createIndexWithConstRuntimeField("keyword"); EsqlQueryResponse response = run("from test | keep const | limit 1"); - assertThat(response.values(), equalTo(List.of(List.of("const")))); + assertThat(getValuesList(response), equalTo(List.of(List.of("const")))); } /** @@ -75,20 +76,20 @@ public void testKeyword() throws InterruptedException, IOException { public void testKeywordBy() throws InterruptedException, IOException { createIndexWithConstRuntimeField("keyword"); EsqlQueryResponse response = run("from test | stats max(foo) by const"); - assertThat(response.values(), equalTo(List.of(List.of(SIZE - 1L, "const")))); + assertThat(getValuesList(response), equalTo(List.of(List.of(SIZE - 1L, "const")))); } public void testBoolean() throws InterruptedException, IOException { createIndexWithConstRuntimeField("boolean"); EsqlQueryResponse response = run("from test | sort foo | limit 3"); - assertThat(response.values(), equalTo(List.of(List.of(true, 0L), List.of(true, 1L), List.of(true, 2L)))); + assertThat(getValuesList(response), equalTo(List.of(List.of(true, 0L), List.of(true, 1L), List.of(true, 2L)))); } public void testDate() throws InterruptedException, IOException { createIndexWithConstRuntimeField("date"); EsqlQueryResponse response = run(""" from test | eval d=date_format(const, "yyyy") | stats min (foo) by d"""); - assertThat(response.values(), equalTo(List.of(List.of(0L, "2023")))); + assertThat(getValuesList(response), equalTo(List.of(List.of(0L, "2023")))); } private void createIndexWithConstRuntimeField(String type) throws InterruptedException, IOException { diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java index 3aaf06ead7ee5..13400890c4e47 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/EsqlDisruptionIT.java @@ -42,7 +42,7 @@ public class EsqlDisruptionIT extends EsqlActionIT { .put(TransportSettings.CONNECT_TIMEOUT.getKey(), "10s") // Network delay disruption waits for the min between this // value and the time of disruption and does not recover immediately // when disruption is stop. We should make sure we recover faster - // then the default of 30s, causing ensureGreen and friends to time out + // than the default of 30s, causing ensureGreen and friends to time out .build(); @Override diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java new file mode 100644 index 0000000000000..a95f601d88ca0 --- /dev/null +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/action/ManyShardsIT.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.action; + +import org.apache.lucene.tests.util.LuceneTestCase; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.esql.plugin.QueryPragmas; + +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; + +/** + * Make sures that we can run many concurrent requests with large number of shards with any data_partitioning. + */ +@LuceneTestCase.SuppressFileSystems(value = "HandleLimitFS") +public class ManyShardsIT extends AbstractEsqlIntegTestCase { + + public void testConcurrentQueries() throws Exception { + int numIndices = between(10, 20); + for (int i = 0; i < numIndices; i++) { + String index = "test-" + i; + client().admin() + .indices() + .prepareCreate(index) + .setSettings( + Settings.builder() + .put("index.shard.check_on_startup", "false") + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, between(1, 5)) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + ) + .setMapping("user", "type=keyword", "tags", "type=keyword") + .get(); + BulkRequestBuilder bulk = client().prepareBulk(index).setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + int numDocs = between(5, 10); + for (int d = 0; d < numDocs; d++) { + String user = randomFrom("u1", "u2", "u3"); + String tag = randomFrom("java", "elasticsearch", "lucene"); + bulk.add(new IndexRequest().source(Map.of("user", user, "tags", tag))); + } + bulk.get(); + } + int numQueries = between(10, 20); + Thread[] threads = new Thread[numQueries]; + CountDownLatch latch = new CountDownLatch(1); + for (int q = 0; q < numQueries; q++) { + threads[q] = new Thread(() -> { + try { + assertTrue(latch.await(1, TimeUnit.MINUTES)); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + var pragmas = Settings.builder() + .put(randomPragmas().getSettings()) + .put("exchange_concurrent_clients", between(1, 2)) + .build(); + run("from test-* | stats count(user) by tags", new QueryPragmas(pragmas)); + }); + } + for (Thread thread : threads) { + thread.start(); + } + latch.countDown(); + for (Thread thread : threads) { + thread.join(); + } + } +} diff --git a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java index b88ba60b47b3b..d4d6e8100f152 100644 --- a/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java +++ b/x-pack/plugin/esql/src/internalClusterTest/java/org/elasticsearch/xpack/esql/plugin/CanMatchIT.java @@ -27,6 +27,7 @@ import java.util.List; import java.util.Set; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.getValuesList; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.equalTo; @@ -78,22 +79,22 @@ public void testCanMatch() { }); } EsqlQueryResponse resp = run("from events_*", randomPragmas(), new RangeQueryBuilder("@timestamp").gte("2023-01-01")); - assertThat(resp.values(), hasSize(4)); + assertThat(getValuesList(resp), hasSize(4)); assertThat(queriedIndices, equalTo(Set.of("events_2023"))); queriedIndices.clear(); resp = run("from events_*", randomPragmas(), new RangeQueryBuilder("@timestamp").lt("2023-01-01")); - assertThat(resp.values(), hasSize(3)); + assertThat(getValuesList(resp), hasSize(3)); assertThat(queriedIndices, equalTo(Set.of("events_2022"))); queriedIndices.clear(); resp = run("from events_*", randomPragmas(), new RangeQueryBuilder("@timestamp").gt("2022-01-01").lt("2023-12-31")); - assertThat(resp.values(), hasSize(7)); + assertThat(getValuesList(resp), hasSize(7)); assertThat(queriedIndices, equalTo(Set.of("events_2022", "events_2023"))); queriedIndices.clear(); resp = run("from events_*", randomPragmas(), new RangeQueryBuilder("@timestamp").gt("2021-01-01").lt("2021-12-31")); - assertThat(resp.values(), hasSize(0)); + assertThat(getValuesList(resp), hasSize(0)); assertThat(queriedIndices, empty()); queriedIndices.clear(); @@ -131,47 +132,47 @@ public void testAliasFilters() { EsqlQueryResponse resp; // employees index resp = run("from employees | stats count(emp_no)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(6L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(6L))); resp = run("from employees | stats avg(salary)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(26.95d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(26.95d))); resp = run("from employees | stats count(emp_no)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(4L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(4L))); resp = run("from employees | stats avg(salary)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(26.65d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(26.65d))); // match both employees index and engineers alias -> employees resp = run("from e* | stats count(emp_no)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(6L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(6L))); resp = run("from employees | stats avg(salary)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(26.95d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(26.95d))); resp = run("from e* | stats count(emp_no)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(4L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(4L))); resp = run("from e* | stats avg(salary)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(26.65d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(26.65d))); // engineers alias resp = run("from engineer* | stats count(emp_no)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(4L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(4L))); resp = run("from engineer* | stats avg(salary)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(26.65d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(26.65d))); resp = run("from engineer* | stats count(emp_no)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(3L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(3L))); resp = run("from engineer* | stats avg(salary)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(27.2d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(27.2d))); // sales alias resp = run("from sales | stats count(emp_no)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(2L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(2L))); resp = run("from sales | stats avg(salary)", randomPragmas()); - assertThat(resp.values().get(0), equalTo(List.of(27.55d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(27.55d))); resp = run("from sales | stats count(emp_no)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(1L))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(1L))); resp = run("from sales | stats avg(salary)", randomPragmas(), new RangeQueryBuilder("hired").lt("2012-04-30")); - assertThat(resp.values().get(0), equalTo(List.of(25.0d))); + assertThat(getValuesList(resp).get(0), equalTo(List.of(25.0d))); } public void testFailOnUnavailableShards() throws Exception { @@ -211,7 +212,7 @@ public void testFailOnUnavailableShards() throws Exception { .add(new IndexRequest().source("timestamp", 11, "message", "bb")) .get(); EsqlQueryResponse resp = run("from events,logs | KEEP timestamp,message"); - assertThat(resp.values(), hasSize(5)); + assertThat(getValuesList(resp), hasSize(5)); internalCluster().stopNode(logsOnlyNode); ensureClusterSizeConsistency(); Exception error = expectThrows(Exception.class, () -> run("from events,logs | KEEP timestamp,message")); diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java new file mode 100644 index 0000000000000..23ccb4d544331 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftEvaluator.java @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Left}. + * This class is generated. Do not edit it. + */ +public final class LeftEvaluator implements EvalOperator.ExpressionEvaluator { + private final BytesRef out; + + private final UnicodeUtil.UTF8CodePoint cp; + + private final EvalOperator.ExpressionEvaluator str; + + private final EvalOperator.ExpressionEvaluator length; + + public LeftEvaluator(BytesRef out, UnicodeUtil.UTF8CodePoint cp, + EvalOperator.ExpressionEvaluator str, EvalOperator.ExpressionEvaluator length) { + this.out = out; + this.cp = cp; + this.str = str; + this.length = length; + } + + @Override + public Block eval(Page page) { + Block strUncastBlock = str.eval(page); + if (strUncastBlock.areAllValuesNull()) { + return Block.constantNullBlock(page.getPositionCount()); + } + BytesRefBlock strBlock = (BytesRefBlock) strUncastBlock; + Block lengthUncastBlock = length.eval(page); + if (lengthUncastBlock.areAllValuesNull()) { + return Block.constantNullBlock(page.getPositionCount()); + } + IntBlock lengthBlock = (IntBlock) lengthUncastBlock; + BytesRefVector strVector = strBlock.asVector(); + if (strVector == null) { + return eval(page.getPositionCount(), strBlock, lengthBlock); + } + IntVector lengthVector = lengthBlock.asVector(); + if (lengthVector == null) { + return eval(page.getPositionCount(), strBlock, lengthBlock); + } + return eval(page.getPositionCount(), strVector, lengthVector).asBlock(); + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock strBlock, IntBlock lengthBlock) { + BytesRefBlock.Builder result = BytesRefBlock.newBlockBuilder(positionCount); + BytesRef strScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (strBlock.isNull(p) || strBlock.getValueCount(p) != 1) { + result.appendNull(); + continue position; + } + if (lengthBlock.isNull(p) || lengthBlock.getValueCount(p) != 1) { + result.appendNull(); + continue position; + } + result.appendBytesRef(Left.process(out, cp, strBlock.getBytesRef(strBlock.getFirstValueIndex(p), strScratch), lengthBlock.getInt(lengthBlock.getFirstValueIndex(p)))); + } + return result.build(); + } + + public BytesRefVector eval(int positionCount, BytesRefVector strVector, IntVector lengthVector) { + BytesRefVector.Builder result = BytesRefVector.newVectorBuilder(positionCount); + BytesRef strScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(Left.process(out, cp, strVector.getBytesRef(p, strScratch), lengthVector.getInt(p))); + } + return result.build(); + } + + @Override + public String toString() { + return "LeftEvaluator[" + "str=" + str + ", length=" + length + "]"; + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java new file mode 100644 index 0000000000000..f075c37d05dd9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightEvaluator.java @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import java.lang.Override; +import java.lang.String; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.BytesRefBlock; +import org.elasticsearch.compute.data.BytesRefVector; +import org.elasticsearch.compute.data.IntBlock; +import org.elasticsearch.compute.data.IntVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.EvalOperator; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Right}. + * This class is generated. Do not edit it. + */ +public final class RightEvaluator implements EvalOperator.ExpressionEvaluator { + private final BytesRef out; + + private final UnicodeUtil.UTF8CodePoint cp; + + private final EvalOperator.ExpressionEvaluator str; + + private final EvalOperator.ExpressionEvaluator length; + + public RightEvaluator(BytesRef out, UnicodeUtil.UTF8CodePoint cp, + EvalOperator.ExpressionEvaluator str, EvalOperator.ExpressionEvaluator length) { + this.out = out; + this.cp = cp; + this.str = str; + this.length = length; + } + + @Override + public Block eval(Page page) { + Block strUncastBlock = str.eval(page); + if (strUncastBlock.areAllValuesNull()) { + return Block.constantNullBlock(page.getPositionCount()); + } + BytesRefBlock strBlock = (BytesRefBlock) strUncastBlock; + Block lengthUncastBlock = length.eval(page); + if (lengthUncastBlock.areAllValuesNull()) { + return Block.constantNullBlock(page.getPositionCount()); + } + IntBlock lengthBlock = (IntBlock) lengthUncastBlock; + BytesRefVector strVector = strBlock.asVector(); + if (strVector == null) { + return eval(page.getPositionCount(), strBlock, lengthBlock); + } + IntVector lengthVector = lengthBlock.asVector(); + if (lengthVector == null) { + return eval(page.getPositionCount(), strBlock, lengthBlock); + } + return eval(page.getPositionCount(), strVector, lengthVector).asBlock(); + } + + public BytesRefBlock eval(int positionCount, BytesRefBlock strBlock, IntBlock lengthBlock) { + BytesRefBlock.Builder result = BytesRefBlock.newBlockBuilder(positionCount); + BytesRef strScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + if (strBlock.isNull(p) || strBlock.getValueCount(p) != 1) { + result.appendNull(); + continue position; + } + if (lengthBlock.isNull(p) || lengthBlock.getValueCount(p) != 1) { + result.appendNull(); + continue position; + } + result.appendBytesRef(Right.process(out, cp, strBlock.getBytesRef(strBlock.getFirstValueIndex(p), strScratch), lengthBlock.getInt(lengthBlock.getFirstValueIndex(p)))); + } + return result.build(); + } + + public BytesRefVector eval(int positionCount, BytesRefVector strVector, IntVector lengthVector) { + BytesRefVector.Builder result = BytesRefVector.newVectorBuilder(positionCount); + BytesRef strScratch = new BytesRef(); + position: for (int p = 0; p < positionCount; p++) { + result.appendBytesRef(Right.process(out, cp, strVector.getBytesRef(p, strScratch), lengthVector.getInt(p))); + } + return result.build(); + } + + @Override + public String toString() { + return "RightEvaluator[" + "str=" + str + ", length=" + length + "]"; + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java new file mode 100644 index 0000000000000..84d8809454f8d --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddDatetimesEvaluator.java @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import java.lang.ArithmeticException; +import java.lang.Override; +import java.lang.String; +import java.time.DateTimeException; +import java.time.temporal.TemporalAmount; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Add}. + * This class is generated. Do not edit it. + */ +public final class AddDatetimesEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator datetime; + + private final TemporalAmount temporalAmount; + + public AddDatetimesEvaluator(Source source, EvalOperator.ExpressionEvaluator datetime, + TemporalAmount temporalAmount) { + this.warnings = new Warnings(source); + this.datetime = datetime; + this.temporalAmount = temporalAmount; + } + + @Override + public Block eval(Page page) { + Block datetimeUncastBlock = datetime.eval(page); + if (datetimeUncastBlock.areAllValuesNull()) { + return Block.constantNullBlock(page.getPositionCount()); + } + LongBlock datetimeBlock = (LongBlock) datetimeUncastBlock; + LongVector datetimeVector = datetimeBlock.asVector(); + if (datetimeVector == null) { + return eval(page.getPositionCount(), datetimeBlock); + } + return eval(page.getPositionCount(), datetimeVector); + } + + public LongBlock eval(int positionCount, LongBlock datetimeBlock) { + LongBlock.Builder result = LongBlock.newBlockBuilder(positionCount); + position: for (int p = 0; p < positionCount; p++) { + if (datetimeBlock.isNull(p) || datetimeBlock.getValueCount(p) != 1) { + result.appendNull(); + continue position; + } + try { + result.appendLong(Add.processDatetimes(datetimeBlock.getLong(datetimeBlock.getFirstValueIndex(p)), temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + + public LongBlock eval(int positionCount, LongVector datetimeVector) { + LongBlock.Builder result = LongBlock.newBlockBuilder(positionCount); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendLong(Add.processDatetimes(datetimeVector.getLong(p), temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + + @Override + public String toString() { + return "AddDatetimesEvaluator[" + "datetime=" + datetime + ", temporalAmount=" + temporalAmount + "]"; + } +} diff --git a/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java new file mode 100644 index 0000000000000..d6e94ce2218ed --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/generated/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubDatetimesEvaluator.java @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License +// 2.0; you may not use this file except in compliance with the Elastic License +// 2.0. +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import java.lang.ArithmeticException; +import java.lang.Override; +import java.lang.String; +import java.time.DateTimeException; +import java.time.temporal.TemporalAmount; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.compute.data.LongBlock; +import org.elasticsearch.compute.data.LongVector; +import org.elasticsearch.compute.data.Page; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.Warnings; +import org.elasticsearch.xpack.ql.tree.Source; + +/** + * {@link EvalOperator.ExpressionEvaluator} implementation for {@link Sub}. + * This class is generated. Do not edit it. + */ +public final class SubDatetimesEvaluator implements EvalOperator.ExpressionEvaluator { + private final Warnings warnings; + + private final EvalOperator.ExpressionEvaluator datetime; + + private final TemporalAmount temporalAmount; + + public SubDatetimesEvaluator(Source source, EvalOperator.ExpressionEvaluator datetime, + TemporalAmount temporalAmount) { + this.warnings = new Warnings(source); + this.datetime = datetime; + this.temporalAmount = temporalAmount; + } + + @Override + public Block eval(Page page) { + Block datetimeUncastBlock = datetime.eval(page); + if (datetimeUncastBlock.areAllValuesNull()) { + return Block.constantNullBlock(page.getPositionCount()); + } + LongBlock datetimeBlock = (LongBlock) datetimeUncastBlock; + LongVector datetimeVector = datetimeBlock.asVector(); + if (datetimeVector == null) { + return eval(page.getPositionCount(), datetimeBlock); + } + return eval(page.getPositionCount(), datetimeVector); + } + + public LongBlock eval(int positionCount, LongBlock datetimeBlock) { + LongBlock.Builder result = LongBlock.newBlockBuilder(positionCount); + position: for (int p = 0; p < positionCount; p++) { + if (datetimeBlock.isNull(p) || datetimeBlock.getValueCount(p) != 1) { + result.appendNull(); + continue position; + } + try { + result.appendLong(Sub.processDatetimes(datetimeBlock.getLong(datetimeBlock.getFirstValueIndex(p)), temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + + public LongBlock eval(int positionCount, LongVector datetimeVector) { + LongBlock.Builder result = LongBlock.newBlockBuilder(positionCount); + position: for (int p = 0; p < positionCount; p++) { + try { + result.appendLong(Sub.processDatetimes(datetimeVector.getLong(p), temporalAmount)); + } catch (ArithmeticException | DateTimeException e) { + warnings.registerException(e); + result.appendNull(); + } + } + return result.build(); + } + + @Override + public String toString() { + return "SubDatetimesEvaluator[" + "datetime=" + datetime + ", temporalAmount=" + temporalAmount + "]"; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/EsqlUnsupportedOperationException.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/EsqlUnsupportedOperationException.java deleted file mode 100644 index b5f8ea0b7f260..0000000000000 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/EsqlUnsupportedOperationException.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the Elastic License - * 2.0; you may not use this file except in compliance with the Elastic License - * 2.0. - */ - -package org.elasticsearch.xpack.esql; - -import org.elasticsearch.xpack.ql.QlServerException; -import org.elasticsearch.xpack.ql.type.DataType; - -public class EsqlUnsupportedOperationException extends QlServerException { - public EsqlUnsupportedOperationException(String message, Throwable cause, boolean enableSuppression, boolean writableStackTrace) { - super(message, cause, enableSuppression, writableStackTrace); - } - - public EsqlUnsupportedOperationException(String message, Throwable cause) { - super(message, cause); - } - - public EsqlUnsupportedOperationException(String message, Object... args) { - super(message, args); - } - - public EsqlUnsupportedOperationException(Throwable cause, String message, Object... args) { - super(cause, message, args); - } - - public EsqlUnsupportedOperationException(String message) { - super(message); - } - - public EsqlUnsupportedOperationException(Throwable cause) { - super(cause); - } - - public static EsqlUnsupportedOperationException methodNotImplemented() { - return new EsqlUnsupportedOperationException("method not implemented"); - } - - public static EsqlUnsupportedOperationException unsupportedDataType(DataType dataType) { - return EsqlUnsupportedOperationException.unsupportedDataType(dataType.typeName()); - } - - public static EsqlUnsupportedOperationException unsupportedDataType(String dataTypeName) { - return new EsqlUnsupportedOperationException("unsupported data type [" + dataTypeName + "]"); - } -} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java index 812b22a9857dc..5196cbb0dfd1c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequest.java @@ -38,6 +38,8 @@ import java.util.function.Supplier; import static org.elasticsearch.action.ValidateActions.addValidationError; +import static org.elasticsearch.common.xcontent.XContentParserUtils.parseFieldsValue; +import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ObjectParser.ValueType.VALUE_ARRAY; public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesRequest { @@ -47,6 +49,13 @@ public class EsqlQueryRequest extends ActionRequest implements CompositeIndicesR true, objects -> new TypedParamValue((String) objects[1], objects[0]) ); + private static final ParseField VALUE = new ParseField("value"); + private static final ParseField TYPE = new ParseField("type"); + + static { + PARAM_PARSER.declareField(constructorArg(), (p, c) -> parseFieldsValue(p), VALUE, ObjectParser.ValueType.VALUE); + PARAM_PARSER.declareString(constructorArg(), TYPE); + } private static final ParseField QUERY_FIELD = new ParseField("query"); private static final ParseField COLUMNAR_FIELD = new ParseField("columnar"); @@ -141,9 +150,7 @@ public void params(List params) { } public static EsqlQueryRequest fromXContent(XContentParser parser) { - EsqlQueryRequest result = PARSER.apply(parser, null); - validateParams(result.params); - return result; + return PARSER.apply(parser, null); } private static ObjectParser objectParser(Supplier supplier) { @@ -171,7 +178,7 @@ private static List parseParams(XContentParser p) throws IOExce Object value = null; String type = null; TypedParamValue previousParam = null; - TypedParamValue currentParam = null; + TypedParamValue currentParam; while ((token = p.nextToken()) != XContentParser.Token.END_ARRAY) { XContentLocation loc = p.getTokenLocation(); @@ -200,9 +207,6 @@ private static List parseParams(XContentParser p) throws IOExce } else if (numberType == XContentParser.NumberType.LONG) { value = p.longValue(); type = "long"; - } else if (numberType == XContentParser.NumberType.FLOAT) { - value = p.floatValue(); - type = "float"; } else if (numberType == XContentParser.NumberType.DOUBLE) { value = p.doubleValue(); type = "double"; @@ -244,17 +248,6 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, return new CancellableTask(id, type, action, query, parentTaskId, headers); } - protected static void validateParams(List params) { - for (TypedParamValue param : params) { - if (param.hasExplicitType()) { - throw new XContentParseException( - fromProto(param.tokenLocation()), - "[params] must be an array where each entry is a single field (no " + "objects supported)" - ); - } - } - } - static org.elasticsearch.xcontent.XContentLocation fromProto(ContentLocation fromProto) { if (fromProto == null) { return null; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java index bcdd2697ad5ce..774208480c6ff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlQueryResponse.java @@ -29,7 +29,7 @@ import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentParser; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.versionfield.Version; @@ -80,15 +80,15 @@ public EsqlQueryResponse(List columns, List> values) { public EsqlQueryResponse(StreamInput in) throws IOException { super(in); - this.columns = in.readList(ColumnInfo::new); - this.pages = in.readList(Page::new); + this.columns = in.readCollectionAsList(ColumnInfo::new); + this.pages = in.readCollectionAsList(Page::new); this.columnar = in.readBoolean(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(columns); - out.writeList(pages); + out.writeCollection(columns); + out.writeCollection(pages); out.writeBoolean(columnar); } @@ -100,7 +100,7 @@ List pages() { return pages; } - public List> values() { + public Iterator> values() { return pagesToValues(columns.stream().map(ColumnInfo::type).toList(), pages); } @@ -175,12 +175,14 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; EsqlQueryResponse that = (EsqlQueryResponse) o; - return Objects.equals(columns, that.columns) && Objects.equals(values(), that.values()) && columnar == that.columnar; + return Objects.equals(columns, that.columns) + && columnar == that.columnar + && Iterators.equals(values(), that.values(), (row1, row2) -> Iterators.equals(row1, row2, Objects::equals)); } @Override public int hashCode() { - return Objects.hash(columns, values(), columnar); + return Objects.hash(columns, Iterators.hashCode(values(), row -> Iterators.hashCode(row, Objects::hashCode)), columnar); } @Override @@ -188,40 +190,33 @@ public String toString() { return Strings.toString(ChunkedToXContent.wrapAsToXContent(this)); } - public static List> pagesToValues(List dataTypes, List pages) { + public static Iterator> pagesToValues(List dataTypes, List pages) { BytesRef scratch = new BytesRef(); - List> result = new ArrayList<>(); - for (Page page : pages) { - for (int p = 0; p < page.getPositionCount(); p++) { - List row = new ArrayList<>(page.getBlockCount()); - for (int b = 0; b < page.getBlockCount(); b++) { - Block block = page.getBlock(b); - if (block.isNull(p)) { - row.add(null); - continue; - } - /* - * Use the ESQL data type to map to the output to make sure compute engine - * respects its types. See the INTEGER clause where is doesn't always - * respect it. - */ - int count = block.getValueCount(p); - int start = block.getFirstValueIndex(p); - if (count == 1) { - row.add(valueAt(dataTypes.get(b), block, start, scratch)); - continue; - } - List thisResult = new ArrayList<>(count); - int end = count + start; - for (int i = start; i < end; i++) { - thisResult.add(valueAt(dataTypes.get(b), block, i, scratch)); - } - row.add(thisResult); + return Iterators.flatMap( + pages.iterator(), + page -> Iterators.forRange(0, page.getPositionCount(), p -> Iterators.forRange(0, page.getBlockCount(), b -> { + Block block = page.getBlock(b); + if (block.isNull(p)) { + return null; } - result.add(row); - } - } - return result; + /* + * Use the ESQL data type to map to the output to make sure compute engine + * respects its types. See the INTEGER clause where is doesn't always + * respect it. + */ + int count = block.getValueCount(p); + int start = block.getFirstValueIndex(p); + if (count == 1) { + return valueAt(dataTypes.get(b), block, start, scratch); + } + List thisResult = new ArrayList<>(count); + int end = count + start; + for (int i = start; i < end; i++) { + thisResult.add(valueAt(dataTypes.get(b), block, i, scratch)); + } + return thisResult; + })) + ); } private static Object valueAt(String dataType, Block block, int offset, BytesRef scratch) { @@ -242,7 +237,7 @@ private static Object valueAt(String dataType, Block block, int offset, BytesRef case "boolean" -> ((BooleanBlock) block).getBoolean(offset); case "version" -> new Version(((BytesRefBlock) block).getBytesRef(offset, scratch)).toString(); case "unsupported" -> UnsupportedValueSource.UNSUPPORTED_OUTPUT; - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(dataType); + default -> throw EsqlIllegalArgumentException.illegalDataType(dataType); }; } @@ -275,7 +270,7 @@ private static Page valuesToPage(List dataTypes, List> valu case "boolean" -> ((BooleanBlock.Builder) builder).appendBoolean(((Boolean) value)); case "null" -> builder.appendNull(); case "version" -> ((BytesRefBlock.Builder) builder).appendBytesRef(new Version(value.toString()).toBytesRef()); - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(dataTypes.get(c)); + default -> throw EsqlIllegalArgumentException.illegalDataType(dataTypes.get(c)); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java index a448f6ffc7200..0e0e9a4599780 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/EsqlResponseListener.java @@ -7,6 +7,10 @@ package org.elasticsearch.xpack.esql.action; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.rest.ChunkedRestResponseBody; import org.elasticsearch.rest.RestChannel; import org.elasticsearch.rest.RestRequest; @@ -18,23 +22,80 @@ import org.elasticsearch.xpack.esql.plugin.EsqlMediaTypeParser; import java.util.Locale; +import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.esql.formatter.TextFormat.CSV; import static org.elasticsearch.xpack.esql.formatter.TextFormat.URL_PARAM_DELIMITER; +/** + * Listens for a single {@link EsqlQueryResponse}, builds a corresponding {@link RestResponse} and sends it. + */ public class EsqlResponseListener extends RestResponseListener { + /** + * A simple, thread-safe stop watch for timing a single action. + * Allows to stop the time for building a response and to log it at a later point. + */ + private static class ThreadSafeStopWatch { + /** + * Start time of the watch + */ + private final long startTimeNS = System.nanoTime(); + /** + * End time of the watch + */ + private long endTimeNS; + + /** + * Is the stop watch currently running? + */ + private boolean running = true; + + /** + * Starts the {@link ThreadSafeStopWatch} immediately after construction. + */ + ThreadSafeStopWatch() {} + + /** + * Stop the stop watch (or do nothing if it was already stopped) and return the elapsed time since starting. + * @return the elapsed time since starting the watch + */ + public TimeValue stop() { + synchronized (this) { + if (running) { + endTimeNS = System.nanoTime(); + running = false; + } + + return new TimeValue(endTimeNS - startTimeNS, TimeUnit.NANOSECONDS); + } + } + } + + private static final Logger LOGGER = LogManager.getLogger(EsqlResponseListener.class); + private static final String HEADER_NAME_TOOK_NANOS = "Took-nanos"; private final RestChannel channel; private final RestRequest restRequest; private final MediaType mediaType; - private final long startNanos = System.nanoTime(); - private static final String HEADER_NAME_TOOK_NANOS = "Took-nanos"; + /** + * Keep the initial query for logging purposes. + */ + private final String esqlQuery; + /** + * Stop the time it took to build a response to later log it. Use something thread-safe here because stopping time requires state and + * {@link EsqlResponseListener} might be used from different threads. + */ + private final ThreadSafeStopWatch stopWatch = new ThreadSafeStopWatch(); + /** + * To correctly time the execution of a request, a {@link EsqlResponseListener} must be constructed immediately before execution begins. + */ public EsqlResponseListener(RestChannel channel, RestRequest restRequest, EsqlQueryRequest esqlRequest) { super(channel); this.channel = channel; this.restRequest = restRequest; + this.esqlQuery = esqlRequest.query(); mediaType = EsqlMediaTypeParser.getResponseMediaType(restRequest, esqlRequest); /* @@ -69,8 +130,25 @@ public RestResponse buildResponse(EsqlQueryResponse esqlResponse) throws Excepti ChunkedRestResponseBody.fromXContent(esqlResponse, channel.request(), channel) ); } - restResponse.addHeader(HEADER_NAME_TOOK_NANOS, Long.toString(System.nanoTime() - startNanos)); + long tookNanos = stopWatch.stop().getNanos(); + restResponse.addHeader(HEADER_NAME_TOOK_NANOS, Long.toString(tookNanos)); return restResponse; } + + /** + * Log the execution time and query when handling an ES|QL response. + */ + public ActionListener wrapWithLogging() { + return ActionListener.wrap(r -> { + onResponse(r); + // At this point, the StopWatch should already have been stopped, so we log a consistent time. + LOGGER.info("Successfully executed ESQL query in {}ms:\n{}", stopWatch.stop().getMillis(), esqlQuery); + }, ex -> { + // In case of failure, stop the time manually before sending out the response. + long timeMillis = stopWatch.stop().getMillis(); + onFailure(ex); + LOGGER.info("Failed executing ESQL query in {}ms:\n{}", timeMillis, esqlQuery); + }); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java index ba173bb3bcd34..15841a00b36de 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/action/RestEsqlQueryAction.java @@ -47,7 +47,11 @@ protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient cli return channel -> { RestCancellableNodeClient cancellableClient = new RestCancellableNodeClient(client, request.getHttpChannel()); - cancellableClient.execute(EsqlQueryAction.INSTANCE, esqlRequest, new EsqlResponseListener(channel, request, esqlRequest)); + cancellableClient.execute( + EsqlQueryAction.INSTANCE, + esqlRequest, + new EsqlResponseListener(channel, request, esqlRequest).wrapWithLogging() + ); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java index c0830931bb140..4c79b2453f8e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/analysis/Verifier.java @@ -7,6 +7,8 @@ package org.elasticsearch.xpack.esql.analysis; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.plan.logical.Dissect; @@ -29,8 +31,6 @@ import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; @@ -271,6 +271,9 @@ public static Failure validateBinaryComparison(BinaryComparison bc) { if (false == r.resolved()) { return fail(bc, r.message()); } + if (DataTypes.isString(bc.left().dataType()) && DataTypes.isString(bc.right().dataType())) { + return null; + } if (bc.left().dataType() != bc.right().dataType()) { return fail( bc, diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java index e07314d7e95ff..8aff85d088d6f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichLookupService.java @@ -52,7 +52,6 @@ import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.action.EsqlQueryAction; import org.elasticsearch.xpack.esql.io.stream.PlanNameRegistry; import org.elasticsearch.xpack.esql.io.stream.PlanStreamInput; @@ -177,7 +176,7 @@ private void doLookup( QueryList queryList = QueryList.termQueryList(fieldType, searchExecutionContext, inputBlock); yield new EnrichQuerySourceOperator(queryList, searchExecutionContext.getIndexReader()); } - default -> throw new EsqlUnsupportedOperationException("unsupported match type " + matchType); + default -> throw new EsqlIllegalArgumentException("illegal match type " + matchType); }; List intermediateOperators = new ArrayList<>(extractFields.size() + 2); final ElementType[] mergingTypes = new ElementType[extractFields.size()]; @@ -293,7 +292,7 @@ private static class LookupRequest extends TransportRequest implements IndicesRe this.matchField = in.readString(); this.inputPage = new Page(in); PlanStreamInput planIn = new PlanStreamInput(in, PlanNameRegistry.INSTANCE, in.namedWriteableRegistry(), null); - this.extractFields = planIn.readList(readerFromPlanReader(PlanStreamInput::readNamedExpression)); + this.extractFields = planIn.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readNamedExpression)); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index 69234ffc7834e..a2b1d914d1435 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -39,6 +39,7 @@ public void resolvePolicy(String policyName, ActionListener new EnrichPolicyResolution(policyName, policy, indexResult)) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java index cbcb946385e64..3ad6bace3dca9 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/QueryList.java @@ -19,7 +19,6 @@ import org.elasticsearch.index.mapper.MappedFieldType; import org.elasticsearch.index.query.SearchExecutionContext; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import java.util.ArrayList; import java.util.List; @@ -98,7 +97,7 @@ private static IntFunction blockToJavaObject(Block block) { yield longBlock::getLong; } case NULL -> offset -> null; - case DOC -> throw new EsqlUnsupportedOperationException("can't read values from [doc] block"); + case DOC -> throw new EsqlIllegalArgumentException("can't read values from [doc] block"); case UNKNOWN -> throw new EsqlIllegalArgumentException("can't read values from [" + block + "]"); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java index 8556fc54dcb82..2f8f09b9ff02b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/EvalMapper.java @@ -161,7 +161,7 @@ public Block eval(Page page) { return page.getBlock(channel); } } - int channel = layout.getChannel(attr.id()); + int channel = layout.get(attr.id()).channel(); return () -> new Attribute(channel); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java index e9f457ea05daa..592a9f3d99499 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/ComparisonMapper.java @@ -9,19 +9,13 @@ import org.elasticsearch.common.TriFunction; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -112,7 +106,7 @@ private ComparisonMapper( this.longs = longs; this.doubles = doubles; this.keywords = keywords; - this.bools = (lhs, rhs) -> { throw EsqlUnsupportedOperationException.unsupportedDataType(DataTypes.BOOLEAN); }; + this.bools = (lhs, rhs) -> { throw EsqlIllegalArgumentException.illegalDataType(DataTypes.BOOLEAN); }; } @Override @@ -145,7 +139,7 @@ public final Supplier map(BinaryComparison bc, if (leftType == DataTypes.DATETIME) { return () -> longs.apply(leftEval.get(), rightEval.get()); } - throw new EsqlUnsupportedOperationException("resolved type for [" + bc + "] but didn't implement mapping"); + throw new EsqlIllegalArgumentException("resolved type for [" + bc + "] but didn't implement mapping"); } public static Supplier castToEvaluator( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java index 81c9c765a4dc3..db3822f047573 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/Equals.java @@ -8,8 +8,51 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class Equals extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals { + public Equals(Source source, Expression left, Expression right) { + super(source, left, right); + } + + public Equals(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Equals::new, left(), right(), zoneId()); + } + + @Override + protected Equals replaceChildren(Expression newLeft, Expression newRight) { + return new Equals(source(), newLeft, newRight, zoneId()); + } + + @Override + public Equals swapLeftAndRight() { + return new Equals(source(), right(), left(), zoneId()); + } + + @Override + public BinaryComparison negate() { + return new NotEquals(source(), left(), right(), zoneId()); + } -public class Equals { @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { return lhs == rhs; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java index 65e8442e91249..5683a9d0d7e85 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThan.java @@ -8,8 +8,52 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class GreaterThan extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan { + public GreaterThan(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, GreaterThan::new, left(), right(), zoneId()); + } + + @Override + protected GreaterThan replaceChildren(Expression newLeft, Expression newRight) { + return new GreaterThan(source(), newLeft, newRight, zoneId()); + } + + @Override + public LessThan swapLeftAndRight() { + return new LessThan(source(), right(), left(), zoneId()); + } + + @Override + public LessThanOrEqual negate() { + return new LessThanOrEqual(source(), left(), right(), zoneId()); + } + + @Override + public BinaryComparison reverse() { + return new LessThan(source(), left(), right(), zoneId()); + } -public class GreaterThan { @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { return lhs > rhs; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java index b0404c0429e1c..ebb29998fb995 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/GreaterThanOrEqual.java @@ -8,8 +8,52 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; -public class GreaterThanOrEqual { +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class GreaterThanOrEqual extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual { + + public GreaterThanOrEqual(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, GreaterThanOrEqual::new, left(), right(), zoneId()); + } + + @Override + protected GreaterThanOrEqual replaceChildren(Expression newLeft, Expression newRight) { + return new GreaterThanOrEqual(source(), newLeft, newRight, zoneId()); + } + + @Override + public LessThanOrEqual swapLeftAndRight() { + return new LessThanOrEqual(source(), right(), left(), zoneId()); + } + + @Override + public LessThan negate() { + return new LessThan(source(), left(), right(), zoneId()); + } + + @Override + public BinaryComparison reverse() { + return new LessThanOrEqual(source(), left(), right(), zoneId()); + } @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java index a6d6b305f6afe..7ed08b658c75e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/InMapper.java @@ -18,7 +18,6 @@ import org.elasticsearch.xpack.esql.evaluator.mapper.ExpressionMapper; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.planner.Layout; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; import java.util.ArrayList; import java.util.BitSet; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java index 9ab461eecbb82..12f54270b65dc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThan.java @@ -8,8 +8,56 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class LessThan extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan { + + public LessThan(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LessThan::new, left(), right(), zoneId()); + } + + @Override + protected org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan replaceChildren( + Expression newLeft, + Expression newRight + ) { + return new LessThan(source(), newLeft, newRight, zoneId()); + } + + @Override + public GreaterThan swapLeftAndRight() { + return new GreaterThan(source(), right(), left(), zoneId()); + } + + @Override + public GreaterThanOrEqual negate() { + return new GreaterThanOrEqual(source(), left(), right(), zoneId()); + } + + @Override + public BinaryComparison reverse() { + return new GreaterThan(source(), left(), right(), zoneId()); + } -public class LessThan { @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { return lhs < rhs; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java index d484b24536082..e75733a9e2340 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/LessThanOrEqual.java @@ -8,8 +8,52 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class LessThanOrEqual extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual { + public LessThanOrEqual(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, LessThanOrEqual::new, left(), right(), zoneId()); + } + + @Override + protected LessThanOrEqual replaceChildren(Expression newLeft, Expression newRight) { + return new LessThanOrEqual(source(), newLeft, newRight, zoneId()); + } + + @Override + public GreaterThanOrEqual swapLeftAndRight() { + return new GreaterThanOrEqual(source(), right(), left(), zoneId()); + } + + @Override + public GreaterThan negate() { + return new GreaterThan(source(), left(), right(), zoneId()); + } + + @Override + public BinaryComparison reverse() { + return new GreaterThanOrEqual(source(), left(), right(), zoneId()); + } -public class LessThanOrEqual { @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { return lhs <= rhs; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java index 98fb4c67782d9..67319bab11b19 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/comparison/NotEquals.java @@ -8,8 +8,47 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class NotEquals extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals { + public NotEquals(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, NotEquals::new, left(), right(), zoneId()); + } + + @Override + protected NotEquals replaceChildren(Expression newLeft, Expression newRight) { + return new NotEquals(source(), newLeft, newRight, zoneId()); + } + + @Override + public NotEquals swapLeftAndRight() { + return new NotEquals(source(), right(), left(), zoneId()); + } + + @Override + public BinaryComparison negate() { + return new Equals(source(), left(), right(), zoneId()); + } -public class NotEquals { @Evaluator(extraName = "Ints") static boolean processInts(int lhs, int rhs) { return lhs != rhs; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RLike.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RLike.java new file mode 100644 index 0000000000000..510c9b7098926 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/RLike.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex; + +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +public class RLike extends org.elasticsearch.xpack.ql.expression.predicate.regex.RLike { + public RLike(Source source, Expression value, RLikePattern pattern) { + super(source, value, pattern); + } + + public RLike(Source source, Expression field, RLikePattern rLikePattern, boolean caseInsensitive) { + super(source, field, rLikePattern, caseInsensitive); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, RLike::new, field(), pattern(), caseInsensitive()); + } + + @Override + protected RLike replaceChild(Expression newChild) { + return new RLike(source(), newChild, pattern(), caseInsensitive()); + } + + @Override + protected TypeResolution resolveType() { + return isString(field(), sourceText(), DEFAULT); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/WildcardLike.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/WildcardLike.java new file mode 100644 index 0000000000000..f7039bb95d1b2 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/evaluator/predicate/operator/regex/WildcardLike.java @@ -0,0 +1,37 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex; + +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +public class WildcardLike extends org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike { + public WildcardLike(Source source, Expression left, WildcardPattern pattern) { + super(source, left, pattern, false); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, WildcardLike::new, field(), pattern()); + } + + @Override + protected WildcardLike replaceChild(Expression newLeft) { + return new WildcardLike(source(), newLeft, pattern()); + } + + @Override + protected TypeResolution resolveType() { + return isString(field(), sourceText(), DEFAULT); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/EsqlTypeResolutions.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/EsqlTypeResolutions.java new file mode 100644 index 0000000000000..d47ccf11c9985 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/EsqlTypeResolutions.java @@ -0,0 +1,45 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression; + +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.type.EsField; + +import java.util.Locale; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class EsqlTypeResolutions { + + public static Expression.TypeResolution isExact(Expression e, String operationName, TypeResolutions.ParamOrdinal paramOrd) { + if (e instanceof FieldAttribute fa) { + if (DataTypes.isString(fa.dataType())) { + // ESQL can extract exact values for TEXT fields + return Expression.TypeResolution.TYPE_RESOLVED; + } + EsField.Exact exact = fa.getExactInfo(); + if (exact.hasExact() == false) { + return new Expression.TypeResolution( + format( + null, + "[{}] cannot operate on {}field of data type [{}]: {}", + operationName, + paramOrd == null || paramOrd == DEFAULT ? "" : paramOrd.name().toLowerCase(Locale.ROOT) + " argument ", + e.dataType().typeName(), + exact.errorMsg() + ) + ); + } + } + return Expression.TypeResolution.TYPE_RESOLVED; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java new file mode 100644 index 0000000000000..95852a00ce2bb --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/Order.java @@ -0,0 +1,40 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression; + +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.List; + +public class Order extends org.elasticsearch.xpack.ql.expression.Order { + public Order(Source source, Expression child, OrderDirection direction, NullsPosition nulls) { + super(source, child, direction, nulls); + } + + @Override + protected TypeResolution resolveType() { + if (DataTypes.isString(child().dataType())) { + return TypeResolution.TYPE_RESOLVED; + } + return super.resolveType(); + } + + @Override + public Order replaceChildren(List newChildren) { + return new Order(source(), newChildren.get(0), direction(), nullsPosition()); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Order::new, child(), direction(), nullsPosition()); + } + +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java index 16367dde550ee..d4029fd8ac4f0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/EsqlFunctionRegistry.java @@ -71,8 +71,10 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RTrim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Right; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; @@ -141,6 +143,8 @@ private FunctionDefinition[][] functions() { def(LTrim.class, LTrim::new, "ltrim"), def(RTrim.class, RTrim::new, "rtrim"), def(Trim.class, Trim::new, "trim"), + def(Left.class, Left::new, "left"), + def(Right.class, Right::new, "right"), def(StartsWith.class, StartsWith::new, "starts_with") }, // date new FunctionDefinition[] { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java index fce52374bfab0..959907ef93257 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/Count.java @@ -10,6 +10,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountAggregatorFunction; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.planner.ToAggregator; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Nullability; @@ -22,6 +23,8 @@ import java.util.List; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + public class Count extends AggregateFunction implements EnclosedAgg, ToAggregator { public Count(Source source, Expression field) { @@ -57,4 +60,9 @@ public AggregatorFunctionSupplier supplier(BigArrays bigArrays, List in public Nullability nullable() { return Nullability.FALSE; } + + @Override + protected TypeResolution resolveType() { + return EsqlTypeResolutions.isExact(field(), sourceText(), DEFAULT); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java index 3e03dab21c6a3..044d89d41a0c5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/CountDistinct.java @@ -14,7 +14,8 @@ import org.elasticsearch.compute.aggregation.CountDistinctDoubleAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountDistinctIntAggregatorFunctionSupplier; import org.elasticsearch.compute.aggregation.CountDistinctLongAggregatorFunctionSupplier; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; import org.elasticsearch.xpack.esql.planner.ToAggregator; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; @@ -26,6 +27,7 @@ import java.util.List; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; @@ -59,7 +61,7 @@ protected TypeResolution resolveType() { return new TypeResolution("Unresolved children"); } - TypeResolution resolution = super.resolveType(); + TypeResolution resolution = EsqlTypeResolutions.isExact(field(), sourceText(), DEFAULT); if (resolution.unresolved() || precision == null) { return resolution; } @@ -87,6 +89,6 @@ public AggregatorFunctionSupplier supplier(BigArrays bigArrays, List in if (type == DataTypes.KEYWORD || type == DataTypes.IP) { return new CountDistinctBytesRefAggregatorFunctionSupplier(bigArrays, inputChannels, precision); } - throw EsqlUnsupportedOperationException.unsupportedDataType(type); + throw EsqlIllegalArgumentException.illegalDataType(type); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java index f77ba8693dc8c..6d3ef52c8965a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/aggregate/NumericAggregate.java @@ -8,7 +8,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.compute.aggregation.AggregatorFunctionSupplier; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.ToAggregator; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -71,7 +71,7 @@ public final AggregatorFunctionSupplier supplier(BigArrays bigArrays, List inputChannels); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java index 2c7298682c74c..f6636241caa72 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/UnaryScalarFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; @@ -67,6 +66,6 @@ public final boolean equals(Object obj) { @Override public final ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java index 948ca91e27565..88610bb989c35 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/Case.java @@ -12,7 +12,6 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; @@ -112,7 +111,7 @@ public Nullability nullable() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java index 1329de203528a..189fac27a0c73 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/convert/AbstractConvertFunction.java @@ -13,7 +13,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.Vector; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Warnings; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; @@ -44,7 +44,7 @@ protected Supplier evaluator(Supplier evaluator.apply(fieldEval.get(), source()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java index 6da3a8c23b9de..c7c923e8e912a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/BinaryDateTimeFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.date; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; @@ -45,7 +44,7 @@ public ZoneId zoneId() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java index ca3258c0c2189..3e9f84c2c67a1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtract.java @@ -12,7 +12,7 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.TypeResolutions; @@ -52,7 +52,7 @@ public Supplier toEvaluator( ChronoField chrono = chronoField(); if (chrono == null) { BytesRef field = (BytesRef) children().get(1).fold(); - throw new EsqlUnsupportedOperationException("invalid date field for [{}]: {}", sourceText(), field.utf8ToString()); + throw new EsqlIllegalArgumentException("invalid date field for [{}]: {}", sourceText(), field.utf8ToString()); } return () -> new DateExtractConstantEvaluator(fieldEvaluator.get(), chrono, configuration().zoneId()); } @@ -103,7 +103,7 @@ public DataType dataType() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java index e4e809fe59a9f..cc1f649cff730 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateFormat.java @@ -12,7 +12,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.ql.expression.Expression; @@ -129,6 +128,6 @@ protected NodeInfo info() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java index d943a4f66b685..5d69b8d7f2219 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParse.java @@ -13,7 +13,6 @@ import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; @@ -135,6 +134,6 @@ protected NodeInfo info() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java index 16a459c6e97df..e2c8dc5b6d85f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateTrunc.java @@ -15,11 +15,9 @@ import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; -import org.elasticsearch.xpack.ql.expression.TypeResolutions; import org.elasticsearch.xpack.ql.expression.function.scalar.BinaryScalarFunction; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; -import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import java.time.Duration; @@ -30,6 +28,7 @@ import java.util.function.Supplier; import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isDate; @@ -57,30 +56,17 @@ protected TypeResolution resolveType() { return resolution; } - return isInterval(interval(), sourceText(), SECOND); + return isType(interval(), EsqlDataTypes::isTemporalAmount, sourceText(), SECOND, "dateperiod", "timeduration"); } // TODO: drop check once 8.11 is released private TypeResolution argumentTypesAreSwapped() { - DataType leftType = left().dataType(); - DataType rightType = right().dataType(); - if (leftType == DataTypes.DATETIME && (rightType == EsqlDataTypes.DATE_PERIOD || rightType == EsqlDataTypes.TIME_DURATION)) { + if (DataTypes.isDateTime(left().dataType()) && isTemporalAmount(right().dataType())) { return new TypeResolution(format(null, "function definition has been updated, please swap arguments in [{}]", sourceText())); } return TypeResolution.TYPE_RESOLVED; } - private static TypeResolution isInterval(Expression e, String operationName, TypeResolutions.ParamOrdinal paramOrd) { - return isType( - e, - dt -> dt == EsqlDataTypes.DATE_PERIOD || dt == EsqlDataTypes.TIME_DURATION, - operationName, - paramOrd, - "dateperiod", - "timeduration" - ); - } - @Override public Object fold() { return EvaluatorMapper.super.fold(); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java index 4e69f3259be1c..2695177c7456a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/Now.java @@ -10,7 +10,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.ConfigurationFunction; @@ -82,6 +81,6 @@ public Supplier toEvaluator( @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java index a012acde2174e..1eed641f72354 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/ip/CIDRMatch.java @@ -11,7 +11,6 @@ import org.elasticsearch.common.network.CIDRUtils; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; @@ -106,7 +105,7 @@ protected TypeResolution resolveType() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Abs.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Abs.java index 9b975924b3c6a..0d2c2f812a0bf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Abs.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Abs.java @@ -9,7 +9,7 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Named; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; @@ -64,7 +64,7 @@ public Supplier toEvaluator( if (dataType() == DataTypes.INTEGER) { return () -> new AbsIntEvaluator(field.get()); } - throw EsqlUnsupportedOperationException.unsupportedDataType(dataType()); + throw EsqlIllegalArgumentException.illegalDataType(dataType()); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java index 9c30c48b3c2ce..103c6f77f0ac2 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2.java @@ -9,7 +9,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Named; import org.elasticsearch.xpack.ql.expression.Expression; @@ -95,7 +94,7 @@ public Object fold() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } public Expression y() { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucket.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucket.java index 20b70bd5ff3f5..321699b06a0f6 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucket.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucket.java @@ -12,7 +12,7 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.mapper.DateFieldMapper; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; @@ -122,7 +122,7 @@ public Supplier toEvaluator( Mul mul = new Mul(source(), floor, rounding); return toEvaluator.apply(mul); } - throw EsqlUnsupportedOperationException.unsupportedDataType(field.dataType()); + throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); } private record DateRoundingPicker(int buckets, long from, long to) { @@ -214,7 +214,7 @@ public DataType dataType() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java index 484e6d3b7b817..cfe8acc1641dc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Cast.java @@ -10,7 +10,7 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.ql.QlIllegalArgumentException; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -63,8 +63,8 @@ public static Supplier cast( throw cantCast(current, required); } - private static EsqlUnsupportedOperationException cantCast(DataType current, DataType required) { - return new EsqlUnsupportedOperationException("can't process [" + current.typeName() + " -> " + required.typeName() + "]"); + private static EsqlIllegalArgumentException cantCast(DataType current, DataType required) { + return new EsqlIllegalArgumentException("can't process [" + current.typeName() + " -> " + required.typeName() + "]"); } @Evaluator(extraName = "IntToLong") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/DoubleConstantFunction.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/DoubleConstantFunction.java index ea79ba2e99308..474c1662fa03b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/DoubleConstantFunction.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/DoubleConstantFunction.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.math; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; @@ -36,7 +35,7 @@ public final DataType dataType() { @Override public final ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java index 42d9cbbd41767..4541fb16580bf 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10.java @@ -9,7 +9,7 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Named; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; @@ -53,7 +53,7 @@ public Supplier toEvaluator( return () -> new Log10UnsignedLongEvaluator(eval); } - throw EsqlUnsupportedOperationException.unsupportedDataType(fieldType); + throw EsqlIllegalArgumentException.illegalDataType(fieldType); } @Evaluator(extraName = "Double") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java index 24879976f9b47..2408cf88c25ff 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Pow.java @@ -9,7 +9,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Named; import org.elasticsearch.xpack.ql.expression.Expression; @@ -162,7 +161,7 @@ private static DataType determineDataType(Expression base, Expression exponent) @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java index bc0554dd2f36b..3ce830837cd5f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Round.java @@ -9,7 +9,7 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; @@ -137,7 +137,7 @@ public DataType dataType() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override @@ -157,7 +157,7 @@ public Supplier toEvaluator( if (fieldType == DataTypes.UNSIGNED_LONG) { return toEvaluator(toEvaluator, Function.identity(), RoundUnsignedLongEvaluator::new); } - throw EsqlUnsupportedOperationException.unsupportedDataType(fieldType); + throw EsqlIllegalArgumentException.illegalDataType(fieldType); } private Supplier toEvaluator( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java index 2a18146a821ce..f20e2d81ca749 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Sqrt.java @@ -9,7 +9,7 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.Named; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; @@ -53,7 +53,7 @@ public Supplier toEvaluator( return () -> new SqrtUnsignedLongEvaluator(eval); } - throw EsqlUnsupportedOperationException.unsupportedDataType(fieldType); + throw EsqlIllegalArgumentException.illegalDataType(fieldType); } @Evaluator(extraName = "Double") diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java index 5f819e21b987c..8bfa48eefd9af 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvg.java @@ -10,7 +10,7 @@ import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.search.aggregations.metrics.CompensatedSum; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -52,7 +52,7 @@ protected Supplier evaluator(Supplier new MvAvgUnsignedLongEvaluator(fieldEval.get()) : () -> new MvAvgLongEvaluator(fieldEval.get()); case NULL -> () -> EvalOperator.CONSTANT_NULL; - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(field().dataType()); + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java index 232a3baaea3cd..7aa9121dce321 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMax.java @@ -10,7 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -44,7 +44,7 @@ protected Supplier evaluator(Supplier () -> new MvMaxIntEvaluator(fieldEval.get()); case LONG -> () -> new MvMaxLongEvaluator(fieldEval.get()); case NULL -> () -> EvalOperator.CONSTANT_NULL; - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(field().dataType()); + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java index 7867676fd0a20..c2916a270d830 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedian.java @@ -13,7 +13,7 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.LongBlock; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -51,7 +51,7 @@ protected Supplier evaluator(Supplier field().dataType() == DataTypes.UNSIGNED_LONG ? () -> new MvMedianUnsignedLongEvaluator(fieldEval.get()) : () -> new MvMedianLongEvaluator(fieldEval.get()); - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(field().dataType()); + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java index 7dfe6be6915ef..c24db3d268494 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMin.java @@ -10,7 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -44,7 +44,7 @@ protected Supplier evaluator(Supplier () -> new MvMinIntEvaluator(fieldEval.get()); case LONG -> () -> new MvMinLongEvaluator(fieldEval.get()); case NULL -> () -> EvalOperator.CONSTANT_NULL; - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(field().dataType()); + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java index ac5ee6cce4d96..843453154b31c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSum.java @@ -10,7 +10,7 @@ import org.elasticsearch.compute.ann.MvEvaluator; import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.search.aggregations.metrics.CompensatedSum; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.NodeInfo; @@ -46,7 +46,8 @@ protected Supplier evaluator(Supplier new MvSumUnsignedLongEvaluator(source(), fieldEval.get()) : () -> new MvSumLongEvaluator(source(), fieldEval.get()); case NULL -> () -> EvalOperator.CONSTANT_NULL; - default -> throw EsqlUnsupportedOperationException.unsupportedDataType(field().dataType()); + + default -> throw EsqlIllegalArgumentException.illegalDataType(field.dataType()); }; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java index 8cd2915d35c25..7730080eaf3ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/Coalesce.java @@ -11,7 +11,6 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; @@ -94,7 +93,7 @@ public Nullability nullable() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java index ce28765b21b43..2387b2571c710 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/package-info.java @@ -123,8 +123,8 @@ * asciidoc ceremony to make the result look right in the rendered docs. * *
  • - * Auto-generate a syntax diagram and a table with supported types by running - * {@code ./gradlew x-pack:plugin:esql:copyGeneratedDocs} + * Generate a syntax diagram and a table with supported types by running the tests via + * gradle: {@code ./gradlew x-pack:plugin:esql:test} * The generated files can be found here * {@code docs/reference/esql/functions/signature/myfunction.svg } * and here diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java index f6fdcfc6f7055..547dfb5b9376c 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Concat.java @@ -12,7 +12,6 @@ import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expressions; @@ -104,6 +103,6 @@ protected NodeInfo info() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java new file mode 100644 index 0000000000000..121225765f5af --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Left.java @@ -0,0 +1,133 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.Named; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +/** + * {code left(foo, len)} is an alias to {code substring(foo, 0, len)} + */ +public class Left extends ScalarFunction implements EvaluatorMapper { + + private final Source source; + + private final Expression str; + + private final Expression length; + + public Left(Source source, @Named("string") Expression str, @Named("length") Expression length) { + super(source, Arrays.asList(str, length)); + this.source = source; + this.str = str; + this.length = length; + } + + @Evaluator + static BytesRef process( + @Fixed(includeInToString = false) BytesRef out, + @Fixed(includeInToString = false) UnicodeUtil.UTF8CodePoint cp, + BytesRef str, + int length + ) { + out.bytes = str.bytes; + out.offset = str.offset; + out.length = str.length; + int curLenStart = 0; + for (int i = 0; i < length && curLenStart < out.length; i++, curLenStart += cp.numBytes) { + UnicodeUtil.codePointAt(out.bytes, out.offset + curLenStart, cp); + } + out.length = Math.min(curLenStart, out.length); + return out; + } + + @Override + public Supplier toEvaluator( + Function> toEvaluator + ) { + + Supplier strSupplier = toEvaluator.apply(str); + Supplier lengthSupplier = toEvaluator.apply(length); + return () -> { + BytesRef out = new BytesRef(); + UnicodeUtil.UTF8CodePoint cp = new UnicodeUtil.UTF8CodePoint(); + return new LeftEvaluator(out, cp, strSupplier.get(), lengthSupplier.get()); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new Left(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Left::new, str, length); + } + + @Override + public DataType dataType() { + return DataTypes.KEYWORD; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(str, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isInteger(length, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public boolean foldable() { + return str.foldable() && length.foldable(); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java new file mode 100644 index 0000000000000..e41fb21b57797 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Right.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.Named; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; +import org.elasticsearch.xpack.ql.expression.gen.script.ScriptTemplate; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.util.Arrays; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.FIRST; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.SECOND; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isInteger; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isString; + +/** + * {code right(foo, len)} is an alias to {code substring(foo, foo.length-len, len)} + */ +public class Right extends ScalarFunction implements EvaluatorMapper { + + private final Source source; + + private final Expression str; + + private final Expression length; + + public Right(Source source, @Named("string") Expression str, @Named("length") Expression length) { + super(source, Arrays.asList(str, length)); + this.source = source; + this.str = str; + this.length = length; + } + + @Evaluator + static BytesRef process( + @Fixed(includeInToString = false) BytesRef out, + @Fixed(includeInToString = false) UnicodeUtil.UTF8CodePoint cp, + BytesRef str, + int length + ) { + out.bytes = str.bytes; + out.offset = str.offset; + out.length = str.length; + int codeLen = UnicodeUtil.codePointCount(str); + // skip the first skipLen codePoint + int skipLen = Math.max(codeLen - length, 0); + int endOffset = str.offset + str.length; + for (int i = 0; i < skipLen && out.offset < endOffset; i++) { + UnicodeUtil.codePointAt(out.bytes, out.offset, cp); + out.offset += cp.numBytes; + out.length -= cp.numBytes; + } + return out; + } + + @Override + public Supplier toEvaluator( + Function> toEvaluator + ) { + + Supplier strSupplier = toEvaluator.apply(str); + Supplier lengthSupplier = toEvaluator.apply(length); + return () -> { + BytesRef out = new BytesRef(); + UnicodeUtil.UTF8CodePoint cp = new UnicodeUtil.UTF8CodePoint(); + return new RightEvaluator(out, cp, strSupplier.get(), lengthSupplier.get()); + }; + } + + @Override + public Expression replaceChildren(List newChildren) { + return new Right(source(), newChildren.get(0), newChildren.get(1)); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, Right::new, str, length); + } + + @Override + public DataType dataType() { + return DataTypes.KEYWORD; + } + + @Override + protected TypeResolution resolveType() { + if (childrenResolved() == false) { + return new TypeResolution("Unresolved children"); + } + + TypeResolution resolution = isString(str, sourceText(), FIRST); + if (resolution.unresolved()) { + return resolution; + } + + resolution = isInteger(length, sourceText(), SECOND); + if (resolution.unresolved()) { + return resolution; + } + + return TypeResolution.TYPE_RESOLVED; + } + + @Override + public boolean foldable() { + return str.foldable() && length.foldable(); + } + + @Override + public Object fold() { + return EvaluatorMapper.super.fold(); + } + + @Override + public ScriptTemplate asScript() { + throw new UnsupportedOperationException(); + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWith.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWith.java index 2a13c148b5a20..3018a95b7d45d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWith.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWith.java @@ -10,7 +10,6 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.scalar.ScalarFunction; @@ -88,7 +87,7 @@ protected NodeInfo info() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java index 7034e83b3e9d9..cca891231fb73 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/Substring.java @@ -11,7 +11,6 @@ import org.apache.lucene.util.UnicodeUtil; import org.elasticsearch.compute.ann.Evaluator; import org.elasticsearch.compute.operator.EvalOperator; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.function.OptionalArgument; @@ -129,7 +128,7 @@ protected NodeInfo info() { @Override public ScriptTemplate asScript() { - throw new EsqlUnsupportedOperationException("functions do not support scripting"); + throw new UnsupportedOperationException("functions do not support scripting"); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java index 089eb6c848fff..0ab7050386fb0 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Add.java @@ -8,15 +8,21 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.BinaryComparisonInversible; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; +import java.time.DateTimeException; +import java.time.temporal.TemporalAmount; + import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.ADD; +import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; +import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAddExact; -public class Add extends EsqlArithmeticOperation implements BinaryComparisonInversible { +public class Add extends DateTimeArithmeticOperation implements BinaryComparisonInversible { public Add(Source source, Expression left, Expression right) { super( @@ -27,7 +33,8 @@ public Add(Source source, Expression left, Expression right) { AddIntsEvaluator::new, AddLongsEvaluator::new, AddUnsignedLongsEvaluator::new, - (s, l, r) -> new AddDoublesEvaluator(l, r) + (s, l, r) -> new AddDoublesEvaluator(l, r), + AddDatetimesEvaluator::new ); } @@ -75,4 +82,10 @@ public static long processUnsignedLongs(long lhs, long rhs) { static double processDoubles(double lhs, double rhs) { return lhs + rhs; } + + @Evaluator(extraName = "Datetimes", warnExceptions = { ArithmeticException.class, DateTimeException.class }) + static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount) { + // using a UTC conversion since `datetime` is always a UTC-Epoch timestamp, either read from ES or converted through a function + return asMillis(asDateTime(datetime).plus(temporalAmount)); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java new file mode 100644 index 0000000000000..01e0af5add780 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DateTimeArithmeticOperation.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.common.TriFunction; +import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; + +import java.time.temporal.TemporalAmount; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; + +abstract class DateTimeArithmeticOperation extends EsqlArithmeticOperation { + + interface DatetimeArithmeticEvaluator extends TriFunction {}; + + private final DatetimeArithmeticEvaluator datetimes; + + DateTimeArithmeticOperation( + Source source, + Expression left, + Expression right, + OperationSymbol op, + ArithmeticEvaluator ints, + ArithmeticEvaluator longs, + ArithmeticEvaluator ulongs, + ArithmeticEvaluator doubles, + DatetimeArithmeticEvaluator datetimes + ) { + super(source, left, right, op, ints, longs, ulongs, doubles); + this.datetimes = datetimes; + } + + @Override + protected TypeResolution resolveType() { + DataType leftType = left().dataType(); + DataType rightType = right().dataType(); + // date math is only possible if one argument is a DATETIME and the other a (foldable) TemporalValue + if (isDateTimeOrTemporal(leftType) || isDateTimeOrTemporal(rightType)) { + if (argumentOfType(DataTypes::isDateTime) == null || argumentOfType(EsqlDataTypes::isTemporalAmount) == null) { + return new TypeResolution( + format(null, "[{}] has arguments with incompatible types [{}] and [{}]", symbol(), leftType, rightType) + ); + } + return TypeResolution.TYPE_RESOLVED; + } + return super.resolveType(); + } + + @Override + public Supplier toEvaluator(Function> toEvaluator) { + return dataType() == DataTypes.DATETIME + ? () -> datetimes.apply( + source(), + toEvaluator.apply(argumentOfType(DataTypes::isDateTime)).get(), + (TemporalAmount) argumentOfType(EsqlDataTypes::isTemporalAmount).fold() + ) + : super.toEvaluator(toEvaluator); + } + + private Expression argumentOfType(Predicate filter) { + return filter.test(left().dataType()) ? left() : filter.test(right().dataType()) ? right() : null; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java index caa658be15636..b3c5ca390100e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/EsqlArithmeticOperation.java @@ -12,7 +12,6 @@ import org.elasticsearch.compute.operator.EvalOperator; import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Cast; import org.elasticsearch.xpack.esql.type.EsqlDataTypeRegistry; @@ -23,6 +22,7 @@ import org.elasticsearch.xpack.ql.type.DataType; import java.io.IOException; +import java.util.function.Function; import java.util.function.Supplier; import static org.elasticsearch.xpack.ql.type.DataTypes.DOUBLE; @@ -52,17 +52,17 @@ enum OperationSymbol implements BinaryArithmeticOperation { @Override public String getWriteableName() { - throw EsqlUnsupportedOperationException.methodNotImplemented(); + throw new UnsupportedOperationException(); } @Override public void writeTo(StreamOutput out) throws IOException { - throw EsqlUnsupportedOperationException.methodNotImplemented(); + throw new UnsupportedOperationException(); } @Override public Object doApply(Object o, Object o2) { - throw EsqlUnsupportedOperationException.methodNotImplemented(); + throw new UnsupportedOperationException(); } @Override @@ -110,9 +110,7 @@ public DataType dataType() { } @Override - public final Supplier toEvaluator( - java.util.function.Function> toEvaluator - ) { + public Supplier toEvaluator(Function> toEvaluator) { var commonType = dataType(); var leftType = left().dataType(); if (leftType.isNumeric()) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Neg.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Neg.java index 5a758f7f722a8..27a3bf362f199 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Neg.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Neg.java @@ -11,24 +11,32 @@ import org.elasticsearch.compute.operator.EvalOperator.ExpressionEvaluator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.evaluator.mapper.EvaluatorMapper; +import org.elasticsearch.xpack.esql.expression.function.Warnings; import org.elasticsearch.xpack.esql.expression.function.scalar.UnaryScalarFunction; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.time.Duration; +import java.time.Period; import java.util.List; import java.util.function.Function; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; -import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isNumeric; +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.isType; public class Neg extends UnaryScalarFunction implements EvaluatorMapper { + private final Warnings warnings; + public Neg(Source source, Expression field) { super(source, field); + warnings = new Warnings(source); } @Override @@ -52,18 +60,50 @@ else if (type == DataTypes.LONG) { if (supplier != null) { return supplier; } + } else if (isTemporalAmount(type)) { + return toEvaluator.apply(field()); } throw new EsqlIllegalArgumentException("arithmetic negation operator with unsupported data type [" + type + "]"); } @Override public final Object fold() { + if (isTemporalAmount(field().dataType()) && field() instanceof Literal literal) { + return foldTemporalAmount(literal); + } return EvaluatorMapper.super.fold(); } + private Object foldTemporalAmount(Literal literal) { + try { + Object value = literal.fold(); + if (value instanceof Period period) { + return period.negated(); + } + if (value instanceof Duration duration) { + return duration.negated(); + } + } catch (ArithmeticException ae) { + warnings.registerException(ae); + return null; + } + + throw new EsqlIllegalArgumentException( + "unexpected non-temporal amount literal [" + literal.sourceText() + "] of type [" + literal.dataType() + "]" + ); + } + @Override protected TypeResolution resolveType() { - return isNumeric(field(), sourceText(), DEFAULT); + return isType( + field(), + dt -> dt.isNumeric() || isTemporalAmount(dt), + sourceText(), + DEFAULT, + "numeric", + "date_period", + "time_duration" + ); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java index 60fbd73cab6d3..a114795b07275 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/Sub.java @@ -8,15 +8,24 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; import org.elasticsearch.compute.ann.Evaluator; +import org.elasticsearch.compute.ann.Fixed; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.BinaryComparisonInversible; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataTypes; +import java.time.DateTimeException; +import java.time.temporal.TemporalAmount; + +import static org.elasticsearch.common.logging.LoggerMessageFormat.format; import static org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.EsqlArithmeticOperation.OperationSymbol.SUB; +import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; +import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongSubtractExact; -public class Sub extends EsqlArithmeticOperation implements BinaryComparisonInversible { +public class Sub extends DateTimeArithmeticOperation implements BinaryComparisonInversible { public Sub(Source source, Expression left, Expression right) { super( @@ -27,10 +36,30 @@ public Sub(Source source, Expression left, Expression right) { SubIntsEvaluator::new, SubLongsEvaluator::new, SubUnsignedLongsEvaluator::new, - (s, l, r) -> new SubDoublesEvaluator(l, r) + (s, l, r) -> new SubDoublesEvaluator(l, r), + SubDatetimesEvaluator::new ); } + @Override + protected TypeResolution resolveType() { + TypeResolution resolution = super.resolveType(); + if (resolution.resolved() && EsqlDataTypes.isDateTimeOrTemporal(dataType()) && DataTypes.isDateTime(left().dataType()) == false) { + return new TypeResolution( + format( + null, + "[{}] arguments are in unsupported order: cannot subtract a [{}] value [{}] from a [{}] amount [{}]", + symbol(), + right().dataType(), + right().sourceText(), + left().dataType(), + left().sourceText() + ) + ); + } + return resolution; + } + @Override protected NodeInfo info() { return NodeInfo.create(this, Sub::new, left(), right()); @@ -65,4 +94,10 @@ static long processUnsignedLongs(long lhs, long rhs) { static double processDoubles(double lhs, double rhs) { return lhs - rhs; } + + @Evaluator(extraName = "Datetimes", warnExceptions = { ArithmeticException.class, DateTimeException.class }) + static long processDatetimes(long datetime, @Fixed TemporalAmount temporalAmount) { + // using a UTC conversion since `datetime` is always a UTC-Epoch timestamp, either read from ES or converted through a function + return asMillis(asDateTime(datetime).minus(temporalAmount)); + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java new file mode 100644 index 0000000000000..4ab2d3fa8e7b9 --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NullEquals.java @@ -0,0 +1,48 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; + +import org.elasticsearch.xpack.esql.expression.EsqlTypeResolutions; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.TypeResolutions; +import org.elasticsearch.xpack.ql.tree.NodeInfo; +import org.elasticsearch.xpack.ql.tree.Source; + +import java.time.ZoneId; + +import static org.elasticsearch.xpack.ql.expression.TypeResolutions.ParamOrdinal.DEFAULT; + +public class NullEquals extends org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals { + public NullEquals(Source source, Expression left, Expression right, ZoneId zoneId) { + super(source, left, right, zoneId); + } + + @Override + protected TypeResolution resolveInputType(Expression e, TypeResolutions.ParamOrdinal paramOrdinal) { + return EsqlTypeResolutions.isExact(e, sourceText(), DEFAULT); + } + + @Override + protected NodeInfo info() { + return NodeInfo.create(this, NullEquals::new, left(), right(), zoneId()); + } + + @Override + protected org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals replaceChildren( + Expression newLeft, + Expression newRight + ) { + return new NullEquals(source(), newLeft, newRight, zoneId()); + } + + @Override + public org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals swapLeftAndRight() { + return new NullEquals(source(), right(), left(), zoneId()); + } + +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java index 5e2b027105aa6..ac8f9560074f5 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormat.java @@ -11,7 +11,6 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.rest.RestRequest; import org.elasticsearch.xcontent.MediaType; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.action.ColumnInfo; import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; @@ -55,12 +54,14 @@ String contentType() { @Override protected Character delimiter() { - throw new EsqlUnsupportedOperationException("plain text does not specify a delimiter character"); + assert false; + throw new UnsupportedOperationException("plain text does not specify a delimiter character"); } @Override protected String eol() { - throw new EsqlUnsupportedOperationException("plain text does not specify an end of line character"); + assert false; + throw new UnsupportedOperationException("plain text does not specify an end of line character"); } @Override @@ -74,6 +75,11 @@ public Set headerValues() { ); } + @Override + void writeEscaped(String value, Character delimiter, Writer writer) { + assert false; + throw new UnsupportedOperationException("plain text does not use writeEscaped()"); + } }, /** @@ -141,33 +147,27 @@ protected Character delimiter(RestRequest request) { } @Override - String maybeEscape(String value, Character delimiter) { - boolean needsEscaping = false; - + void writeEscaped(String value, Character delimiter, Writer writer) throws IOException { + int remainderStart = -1; // the index of the first character not copied to the output, or -1 if not escaping yet for (int i = 0; i < value.length(); i++) { char c = value.charAt(i); - if (c == '"' || c == '\n' || c == '\r' || c == delimiter) { - needsEscaping = true; - break; + if (remainderStart == -1 && (c == '"' || c == '\n' || c == '\r' || c == delimiter)) { + writer.write('"'); + remainderStart = 0; } - } - - if (needsEscaping) { - StringBuilder sb = new StringBuilder(); - - sb.append('"'); - for (int i = 0; i < value.length(); i++) { - char c = value.charAt(i); - if (value.charAt(i) == '"') { - sb.append('"'); - } - sb.append(c); + if (c == '"') { + writer.append(value, remainderStart, i + 1); + writer.write('"'); + remainderStart = i + 1; } - sb.append('"'); - value = sb.toString(); } - return value; + if (remainderStart == -1) { + writer.write(value); + } else { + writer.append(value, remainderStart, value.length()); + writer.write('"'); + } } @Override @@ -233,19 +233,24 @@ public String contentType(RestRequest request) { } @Override - String maybeEscape(String value, Character __) { - StringBuilder sb = new StringBuilder(); - + void writeEscaped(String value, Character delimiter, Writer writer) throws IOException { + int remainderStart = 0; // the index of the first character not copied to the output for (int i = 0; i < value.length(); i++) { char c = value.charAt(i); switch (c) { - case '\n' -> sb.append("\\n"); - case '\t' -> sb.append("\\t"); - default -> sb.append(c); + case '\n' -> { + writer.append(value, remainderStart, i); + writer.write("\\n"); + remainderStart = i + 1; + } + case '\t' -> { + writer.append(value, remainderStart, i); + writer.write("\\t"); + remainderStart = i + 1; + } } } - - return sb.toString(); + writer.append(value, remainderStart, value.length()); } @Override @@ -280,13 +285,12 @@ public Set headerValues() { public Iterator> format(RestRequest request, EsqlQueryResponse esqlResponse) { final var delimiter = delimiter(request); - return Iterators.concat(hasHeader(request) && esqlResponse.columns() != null ? - // if the header is requested return the info - Iterators.single(writer -> row(writer, esqlResponse.columns(), ColumnInfo::name, delimiter)) : Collections.emptyIterator(), - Iterators.map( - esqlResponse.values().iterator(), - row -> writer -> row(writer, row, f -> Objects.toString(f, StringUtils.EMPTY), delimiter) - ) + return Iterators.concat( + // if the header is requested return the info + hasHeader(request) && esqlResponse.columns() != null + ? Iterators.single(writer -> row(writer, esqlResponse.columns().iterator(), ColumnInfo::name, delimiter)) + : Collections.emptyIterator(), + Iterators.map(esqlResponse.values(), row -> writer -> row(writer, row, f -> Objects.toString(f, StringUtils.EMPTY), delimiter)) ); } @@ -309,12 +313,15 @@ public String contentType(RestRequest request) { } // utility method for consuming a row. - void row(Writer writer, List row, Function toString, Character delimiter) throws IOException { - for (int i = 0; i < row.size(); i++) { - writer.append(maybeEscape(toString.apply(row.get(i)), delimiter)); - if (i < row.size() - 1) { + void row(Writer writer, Iterator row, Function toString, Character delimiter) throws IOException { + boolean firstColumn = true; + while (row.hasNext()) { + if (firstColumn) { + firstColumn = false; + } else { writer.append(delimiter); } + writeEscaped(toString.apply(row.next()), delimiter, writer); } writer.append(eol()); } @@ -334,9 +341,7 @@ protected Character delimiter(RestRequest request) { protected abstract String eol(); /** - * Method used for escaping (if needed) a given value. + * Write the given {@code value} to the {@code writer}, adding escaping if needed. */ - String maybeEscape(String value, Character delimiter) { - return value; - } + abstract void writeEscaped(String value, Character delimiter, Writer writer) throws IOException; } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java index 8ba8ecfbfe8b8..0535e4adfe346 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/formatter/TextFormatter.java @@ -46,10 +46,14 @@ public TextFormatter(EsqlQueryResponse response) { } // 2. Expand columns to fit the largest value - for (var row : response.values()) { + var iterator = response.values(); + while (iterator.hasNext()) { + var row = iterator.next(); for (int i = 0; i < width.length; i++) { - width[i] = Math.max(width[i], FORMATTER.apply(row.get(i)).length()); + assert row.hasNext(); + width[i] = Math.max(width[i], FORMATTER.apply(row.next()).length()); } + assert row.hasNext() == false; } } @@ -74,10 +78,10 @@ private void formatHeader(Writer writer) throws IOException { String name = response.columns().get(i).name(); // left padding int leftPadding = (width[i] - name.length()) / 2; - writer.append(" ".repeat(Math.max(0, leftPadding))); + writePadding(leftPadding, writer); writer.append(name); // right padding - writer.append(" ".repeat(Math.max(0, width[i] - name.length() - leftPadding))); + writePadding(width[i] - name.length() - leftPadding, writer); } writer.append('\n'); @@ -91,23 +95,37 @@ private void formatHeader(Writer writer) throws IOException { } private Iterator> formatResults() { - return Iterators.map(response.values().iterator(), row -> writer -> { + return Iterators.map(response.values(), row -> writer -> { for (int i = 0; i < width.length; i++) { + assert row.hasNext(); if (i > 0) { writer.append('|'); } - String string = FORMATTER.apply(row.get(i)); + String string = FORMATTER.apply(row.next()); if (string.length() <= width[i]) { // Pad writer.append(string); - writer.append(" ".repeat(Math.max(0, width[i] - string.length()))); + writePadding(width[i] - string.length(), writer); } else { // Trim writer.append(string, 0, width[i] - 1); writer.append('~'); } } + assert row.hasNext() == false; writer.append('\n'); }); } + + private static final String PADDING_64 = " ".repeat(64); + + private static void writePadding(int padding, Writer writer) throws IOException { + while (padding > PADDING_64.length()) { + writer.append(PADDING_64); + padding -= PADDING_64.length(); + } + if (padding > 0) { + writer.append(PADDING_64, 0, padding); + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNameRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNameRegistry.java index 5197abd5676f5..0c10a424d9603 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNameRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNameRegistry.java @@ -10,7 +10,6 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import java.io.IOException; import java.util.ArrayList; @@ -74,7 +73,7 @@ interface PlanNamedReader extends PlanReader { V read(PlanStreamInput in, String name) throws IOException; default V read(PlanStreamInput in) throws IOException { - throw new EsqlUnsupportedOperationException("should not reach here"); + throw new UnsupportedOperationException("should not reach here"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java index 832e0227d6a7f..edc8bcacda9d4 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypes.java @@ -10,11 +10,18 @@ import org.elasticsearch.common.TriFunction; import org.elasticsearch.common.io.stream.NamedWriteable; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.dissect.DissectParser; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.core.enrich.EnrichPolicy; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.RLike; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.WildcardLike; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; @@ -82,8 +89,10 @@ import org.elasticsearch.xpack.esql.expression.function.scalar.nulls.Coalesce; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Concat; import org.elasticsearch.xpack.esql.expression.function.scalar.string.LTrim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Left; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Length; import org.elasticsearch.xpack.esql.expression.function.scalar.string.RTrim; +import org.elasticsearch.xpack.esql.expression.function.scalar.string.Right; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Split; import org.elasticsearch.xpack.esql.expression.function.scalar.string.StartsWith; import org.elasticsearch.xpack.esql.expression.function.scalar.string.Substring; @@ -95,6 +104,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Neg; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Dissect.Parser; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -143,17 +153,8 @@ import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.ArithmeticOperation; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparisonProcessor; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals; -import org.elasticsearch.xpack.ql.expression.predicate.regex.RLike; import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; -import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; @@ -167,6 +168,7 @@ import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DateEsField; import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.type.InvalidMappedField; import org.elasticsearch.xpack.ql.type.KeywordEsField; import org.elasticsearch.xpack.ql.type.TextEsField; import org.elasticsearch.xpack.ql.type.UnsupportedEsField; @@ -265,6 +267,7 @@ public static List namedTypeEntries() { // EsFields of(EsField.class, EsField.class, PlanNamedTypes::writeEsField, PlanNamedTypes::readEsField), of(EsField.class, DateEsField.class, PlanNamedTypes::writeDateEsField, PlanNamedTypes::readDateEsField), + of(EsField.class, InvalidMappedField.class, PlanNamedTypes::writeInvalidMappedField, PlanNamedTypes::readInvalidMappedField), of(EsField.class, KeywordEsField.class, PlanNamedTypes::writeKeywordEsField, PlanNamedTypes::readKeywordEsField), of(EsField.class, TextEsField.class, PlanNamedTypes::writeTextEsField, PlanNamedTypes::readTextEsField), of(EsField.class, UnsupportedEsField.class, PlanNamedTypes::writeUnsupportedEsField, PlanNamedTypes::readUnsupportedEsField), @@ -343,6 +346,8 @@ public static List namedTypeEntries() { of(ScalarFunction.class, Pow.class, PlanNamedTypes::writePow, PlanNamedTypes::readPow), of(ScalarFunction.class, StartsWith.class, PlanNamedTypes::writeStartsWith, PlanNamedTypes::readStartsWith), of(ScalarFunction.class, Substring.class, PlanNamedTypes::writeSubstring, PlanNamedTypes::readSubstring), + of(ScalarFunction.class, Left.class, PlanNamedTypes::writeLeft, PlanNamedTypes::readLeft), + of(ScalarFunction.class, Right.class, PlanNamedTypes::writeRight, PlanNamedTypes::readRight), of(ScalarFunction.class, Split.class, PlanNamedTypes::writeSplit, PlanNamedTypes::readSplit), of(ScalarFunction.class, Tau.class, PlanNamedTypes::writeNoArgScalar, PlanNamedTypes::readNoArgScalar), // ArithmeticOperations @@ -381,7 +386,7 @@ static AggregateExec readAggregateExec(PlanStreamInput in) throws IOException { return new AggregateExec( Source.EMPTY, in.readPhysicalPlanNode(), - in.readList(readerFromPlanReader(PlanStreamInput::readExpression)), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)), readNamedExpressions(in), in.readEnum(AggregateExec.Mode.class), in.readOptionalVInt() @@ -414,7 +419,7 @@ static EsQueryExec readEsQueryExec(PlanStreamInput in) throws IOException { readAttributes(in), in.readOptionalNamedWriteable(QueryBuilder.class), in.readOptionalNamed(Expression.class), - in.readOptionalList(readerFromPlanReader(PlanNamedTypes::readFieldSort)), + in.readOptionalCollectionAsList(readerFromPlanReader(PlanNamedTypes::readFieldSort)), in.readOptionalVInt() ); } @@ -566,7 +571,11 @@ static void writeMvExpandExec(PlanStreamOutput out, MvExpandExec mvExpandExec) t } static OrderExec readOrderExec(PlanStreamInput in) throws IOException { - return new OrderExec(Source.EMPTY, in.readPhysicalPlanNode(), in.readList(readerFromPlanReader(PlanNamedTypes::readOrder))); + return new OrderExec( + Source.EMPTY, + in.readPhysicalPlanNode(), + in.readCollectionAsList(readerFromPlanReader(PlanNamedTypes::readOrder)) + ); } static void writeOrderExec(PlanStreamOutput out, OrderExec orderExec) throws IOException { @@ -606,7 +615,7 @@ static TopNExec readTopNExec(PlanStreamInput in) throws IOException { return new TopNExec( Source.EMPTY, in.readPhysicalPlanNode(), - in.readList(readerFromPlanReader(PlanNamedTypes::readOrder)), + in.readCollectionAsList(readerFromPlanReader(PlanNamedTypes::readOrder)), in.readNamed(Expression.class), in.readOptionalVInt() ); @@ -624,7 +633,7 @@ static Aggregate readAggregate(PlanStreamInput in) throws IOException { return new Aggregate( Source.EMPTY, in.readLogicalPlanNode(), - in.readList(readerFromPlanReader(PlanStreamInput::readExpression)), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)), readNamedExpressions(in) ); } @@ -722,7 +731,11 @@ static void writeLimit(PlanStreamOutput out, Limit limit) throws IOException { } static OrderBy readOrderBy(PlanStreamInput in) throws IOException { - return new OrderBy(Source.EMPTY, in.readLogicalPlanNode(), in.readList(readerFromPlanReader(PlanNamedTypes::readOrder))); + return new OrderBy( + Source.EMPTY, + in.readLogicalPlanNode(), + in.readCollectionAsList(readerFromPlanReader(PlanNamedTypes::readOrder)) + ); } static void writeOrderBy(PlanStreamOutput out, OrderBy order) throws IOException { @@ -743,7 +756,7 @@ static TopN readTopN(PlanStreamInput in) throws IOException { return new TopN( Source.EMPTY, in.readLogicalPlanNode(), - in.readList(readerFromPlanReader(PlanNamedTypes::readOrder)), + in.readCollectionAsList(readerFromPlanReader(PlanNamedTypes::readOrder)), in.readNamed(Expression.class) ); } @@ -759,7 +772,7 @@ static void writeTopN(PlanStreamOutput out, TopN topN) throws IOException { // private static List readAttributes(PlanStreamInput in) throws IOException { - return in.readList(readerFromPlanReader(PlanStreamInput::readAttribute)); + return in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readAttribute)); } static void writeAttributes(PlanStreamOutput out, List attributes) throws IOException { @@ -767,7 +780,7 @@ static void writeAttributes(PlanStreamOutput out, List attributes) th } private static List readNamedExpressions(PlanStreamInput in) throws IOException { - return in.readList(readerFromPlanReader(PlanStreamInput::readNamedExpression)); + return in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readNamedExpression)); } static void writeNamedExpressions(PlanStreamOutput out, List namedExpressions) throws IOException { @@ -775,7 +788,7 @@ static void writeNamedExpressions(PlanStreamOutput out, List readAliases(PlanStreamInput in) throws IOException { - return in.readList(readerFromPlanReader(PlanNamedTypes::readAlias)); + return in.readCollectionAsList(readerFromPlanReader(PlanNamedTypes::readAlias)); } static void writeAliases(PlanStreamOutput out, List aliases) throws IOException { @@ -883,7 +896,7 @@ static EsField readEsField(PlanStreamInput in) throws IOException { static void writeEsField(PlanStreamOutput out, EsField esField) throws IOException { out.writeString(esField.getName()); out.writeString(esField.getDataType().typeName()); - out.writeMap(esField.getProperties(), StreamOutput::writeString, (o, v) -> out.writeNamed(EsField.class, v)); + out.writeMap(esField.getProperties(), (o, v) -> out.writeNamed(EsField.class, v)); out.writeBoolean(esField.isAggregatable()); out.writeBoolean(esField.isAlias()); } @@ -898,10 +911,19 @@ static DateEsField readDateEsField(PlanStreamInput in) throws IOException { static void writeDateEsField(PlanStreamOutput out, DateEsField dateEsField) throws IOException { out.writeString(dateEsField.getName()); - out.writeMap(dateEsField.getProperties(), StreamOutput::writeString, (o, v) -> out.writeNamed(EsField.class, v)); + out.writeMap(dateEsField.getProperties(), (o, v) -> out.writeNamed(EsField.class, v)); out.writeBoolean(dateEsField.isAggregatable()); } + static InvalidMappedField readInvalidMappedField(PlanStreamInput in) throws IOException { + return new InvalidMappedField(in.readString(), in.readString()); + } + + static void writeInvalidMappedField(PlanStreamOutput out, InvalidMappedField field) throws IOException { + out.writeString(field.getName()); + out.writeString(field.errorMessage()); + } + static KeywordEsField readKeywordEsField(PlanStreamInput in) throws IOException { return new KeywordEsField( in.readString(), @@ -915,7 +937,7 @@ static KeywordEsField readKeywordEsField(PlanStreamInput in) throws IOException static void writeKeywordEsField(PlanStreamOutput out, KeywordEsField keywordEsField) throws IOException { out.writeString(keywordEsField.getName()); - out.writeMap(keywordEsField.getProperties(), StreamOutput::writeString, (o, v) -> out.writeNamed(EsField.class, v)); + out.writeMap(keywordEsField.getProperties(), (o, v) -> out.writeNamed(EsField.class, v)); out.writeBoolean(keywordEsField.isAggregatable()); out.writeInt(keywordEsField.getPrecision()); out.writeBoolean(keywordEsField.getNormalized()); @@ -933,7 +955,7 @@ static TextEsField readTextEsField(PlanStreamInput in) throws IOException { static void writeTextEsField(PlanStreamOutput out, TextEsField textEsField) throws IOException { out.writeString(textEsField.getName()); - out.writeMap(textEsField.getProperties(), StreamOutput::writeString, (o, v) -> out.writeNamed(EsField.class, v)); + out.writeMap(textEsField.getProperties(), (o, v) -> out.writeNamed(EsField.class, v)); out.writeBoolean(textEsField.isAggregatable()); out.writeBoolean(textEsField.isAlias()); } @@ -951,7 +973,7 @@ static void writeUnsupportedEsField(PlanStreamOutput out, UnsupportedEsField uns out.writeString(unsupportedEsField.getName()); out.writeString(unsupportedEsField.getOriginalType()); out.writeOptionalString(unsupportedEsField.getInherited()); - out.writeMap(unsupportedEsField.getProperties(), StreamOutput::writeString, (o, v) -> out.writeNamed(EsField.class, v)); + out.writeMap(unsupportedEsField.getProperties(), (o, v) -> out.writeNamed(EsField.class, v)); } // -- BinaryComparison @@ -982,7 +1004,7 @@ static void writeBinComparison(PlanStreamOutput out, BinaryComparison binaryComp // -- InComparison static In readInComparison(PlanStreamInput in) throws IOException { - return new In(Source.EMPTY, in.readExpression(), in.readList(readerFromPlanReader(PlanStreamInput::readExpression))); + return new In(Source.EMPTY, in.readExpression(), in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression))); } static void writeInComparison(PlanStreamOutput out, In in) throws IOException { @@ -1149,7 +1171,7 @@ static void writeAutoBucket(PlanStreamOutput out, AutoBucket bucket) throws IOEx static ScalarFunction readVarag(PlanStreamInput in, String name) throws IOException { return VARARG_CTORS.get(name) - .apply(Source.EMPTY, in.readExpression(), in.readList(readerFromPlanReader(PlanStreamInput::readExpression))); + .apply(Source.EMPTY, in.readExpression(), in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression))); } static void writeVararg(PlanStreamOutput out, ScalarFunction vararg) throws IOException { @@ -1273,6 +1295,28 @@ static void writeSubstring(PlanStreamOutput out, Substring substring) throws IOE out.writeOptionalWriteable(fields.size() == 3 ? o -> out.writeExpression(fields.get(2)) : null); } + static Left readLeft(PlanStreamInput in) throws IOException { + return new Left(Source.EMPTY, in.readExpression(), in.readExpression()); + } + + static void writeLeft(PlanStreamOutput out, Left left) throws IOException { + List fields = left.children(); + assert fields.size() == 2; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + } + + static Right readRight(PlanStreamInput in) throws IOException { + return new Right(Source.EMPTY, in.readExpression(), in.readExpression()); + } + + static void writeRight(PlanStreamOutput out, Right right) throws IOException { + List fields = right.children(); + assert fields.size() == 2; + out.writeExpression(fields.get(0)); + out.writeExpression(fields.get(1)); + } + static Split readSplit(PlanStreamInput in) throws IOException { return new Split(Source.EMPTY, in.readExpression(), in.readExpression()); } @@ -1283,7 +1327,11 @@ static void writeSplit(PlanStreamOutput out, Split split) throws IOException { } static CIDRMatch readCIDRMatch(PlanStreamInput in) throws IOException { - return new CIDRMatch(Source.EMPTY, in.readExpression(), in.readList(readerFromPlanReader(PlanStreamInput::readExpression))); + return new CIDRMatch( + Source.EMPTY, + in.readExpression(), + in.readCollectionAsList(readerFromPlanReader(PlanStreamInput::readExpression)) + ); } static void writeCIDRMatch(PlanStreamOutput out, CIDRMatch cidrMatch) throws IOException { @@ -1394,7 +1442,7 @@ static void writeLiteral(PlanStreamOutput out, Literal literal) throws IOExcepti } static Order readOrder(PlanStreamInput in) throws IOException { - return new Order( + return new org.elasticsearch.xpack.esql.expression.Order( Source.EMPTY, in.readNamed(Expression.class), in.readEnum(Order.OrderDirection.class), @@ -1435,7 +1483,7 @@ static EsIndex readEsIndex(PlanStreamInput in) throws IOException { static void writeEsIndex(PlanStreamOutput out, EsIndex esIndex) throws IOException { out.writeString(esIndex.name()); - out.writeMap(esIndex.mapping(), StreamOutput::writeString, (o, v) -> out.writeNamed(EsField.class, v)); + out.writeMap(esIndex.mapping(), (o, v) -> out.writeNamed(EsField.class, v)); out.writeGenericValue(esIndex.concreteIndices()); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java index 49173779406cc..8e38700a58225 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInput.java @@ -28,8 +28,11 @@ import java.io.IOException; import java.util.Collection; +import java.util.HashMap; import java.util.HashSet; +import java.util.Map; import java.util.function.LongFunction; +import java.util.function.Supplier; /** * A customized stream input used to deserialize ESQL physical plan fragments. Complements stream @@ -37,7 +40,22 @@ */ public final class PlanStreamInput extends NamedWriteableAwareStreamInput { - private static final LongFunction DEFAULT_NAME_ID_FUNC = NameId::new; + /** + * A Mapper of stream named id, represented as a primitive long value, to NameId instance. + * The no-args NameId constructor is used for absent entries, as it will automatically select + * and increment an id from the global counter, thus avoiding potential conflicts between the + * id in the stream and id's during local re-planning on the data node. + */ + static final class NameIdMapper implements LongFunction { + final Map seen = new HashMap<>(); + + @Override + public NameId apply(long streamNameId) { + return seen.computeIfAbsent(streamNameId, k -> new NameId()); + } + } + + private static final Supplier> DEFAULT_NAME_ID_FUNC = NameIdMapper::new; private final PlanNameRegistry registry; @@ -51,21 +69,11 @@ public PlanStreamInput( PlanNameRegistry registry, NamedWriteableRegistry namedWriteableRegistry, EsqlConfiguration configuration - ) { - this(streamInput, registry, namedWriteableRegistry, configuration, DEFAULT_NAME_ID_FUNC); - } - - public PlanStreamInput( - StreamInput streamInput, - PlanNameRegistry registry, - NamedWriteableRegistry namedWriteableRegistry, - EsqlConfiguration configuration, - LongFunction nameIdFunction ) { super(streamInput, namedWriteableRegistry); this.registry = registry; - this.nameIdFunction = nameIdFunction; this.configuration = configuration; + this.nameIdFunction = DEFAULT_NAME_ID_FUNC.get(); } NameId nameIdFromLongValue(long value) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java index 74e1a9e0cc180..8182b41df339f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LocalPhysicalPlanOptimizer.java @@ -9,6 +9,8 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerRules.OptimizerRule; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; @@ -37,8 +39,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.BinaryLogic; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; import org.elasticsearch.xpack.ql.planner.ExpressionTranslator; @@ -46,6 +46,8 @@ import org.elasticsearch.xpack.ql.querydsl.query.Query; import org.elasticsearch.xpack.ql.rule.ParameterizedRuleExecutor; import org.elasticsearch.xpack.ql.rule.Rule; +import org.elasticsearch.xpack.ql.util.Queries; +import org.elasticsearch.xpack.ql.util.Queries.Clause; import java.util.ArrayList; import java.util.Collection; @@ -56,12 +58,11 @@ import java.util.function.Supplier; import static java.util.Arrays.asList; -import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.xpack.ql.expression.predicate.Predicates.splitAnd; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection.UP; public class LocalPhysicalPlanOptimizer extends ParameterizedRuleExecutor { - private static final QlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); + public static final QlTranslatorHandler TRANSLATOR_HANDLER = new EsqlTranslatorHandler(); private final PhysicalVerifier verifier = new PhysicalVerifier(); @@ -179,7 +180,7 @@ private static Set missingAttributes(PhysicalPlan p) { } } - private static class PushFiltersToSource extends OptimizerRule { + public static class PushFiltersToSource extends OptimizerRule { @Override protected PhysicalPlan rule(FilterExec filterExec) { PhysicalPlan plan = filterExec; @@ -191,11 +192,7 @@ protected PhysicalPlan rule(FilterExec filterExec) { } if (pushable.size() > 0) { // update the executable with pushable conditions QueryBuilder planQuery = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(pushable)).asBuilder(); - QueryBuilder query = planQuery; - QueryBuilder filterQuery = queryExec.query(); - if (filterQuery != null) { - query = boolQuery().filter(filterQuery).filter(planQuery); - } + var query = Queries.combine(Clause.FILTER, asList(queryExec.query(), planQuery)); queryExec = new EsQueryExec( queryExec.source(), queryExec.index(), @@ -216,7 +213,7 @@ protected PhysicalPlan rule(FilterExec filterExec) { return plan; } - private static boolean canPushToSource(Expression exp) { + public static boolean canPushToSource(Expression exp) { if (exp instanceof BinaryComparison bc) { return isAttributePushable(bc.left(), bc) && bc.right().foldable(); } else if (exp instanceof BinaryLogic bl) { @@ -232,7 +229,7 @@ private static boolean canPushToSource(Expression exp) { } private static boolean isAttributePushable(Expression expression, ScalarFunction operation) { - if (expression instanceof FieldAttribute) { + if (expression instanceof FieldAttribute f && f.getExactInfo().hasExact()) { return true; } if (expression instanceof MetadataAttribute ma && ma.searchable()) { @@ -282,8 +279,8 @@ protected PhysicalPlan rule(TopNExec topNExec) { } private boolean canPushDownOrders(List orders) { - // allow only FieldAttributes (no expressions) for sorting - return false == Expressions.match(orders, s -> ((Order) s).child() instanceof FieldAttribute == false); + // allow only exact FieldAttributes (no expressions) for sorting + return orders.stream().allMatch(o -> o.child() instanceof FieldAttribute fa && fa.getExactInfo().hasExact()); } private List buildFieldSorts(List orders) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java index bc78c3f5f8636..4f6f523c53bbb 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizer.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.BlockUtils; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.In; @@ -28,6 +29,7 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.ExpressionSet; import org.elasticsearch.xpack.ql.expression.Expressions; +import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; @@ -35,6 +37,7 @@ import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; +import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BinaryComparisonSimplification; import org.elasticsearch.xpack.ql.optimizer.OptimizerRules.BooleanFunctionEqualsElimination; @@ -61,6 +64,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.function.Predicate; import static java.util.Arrays.asList; @@ -69,7 +73,6 @@ import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.FoldNull; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateEquals; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.PropagateNullable; -import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.ReplaceRegexMatch; import static org.elasticsearch.xpack.ql.optimizer.OptimizerRules.TransformDirection; public class LogicalPlanOptimizer extends RuleExecutor { @@ -84,7 +87,13 @@ protected List> batches() { } protected static List> rules() { - var substitutions = new Batch<>("Substitutions", Limiter.ONCE, new SubstituteSurrogates(), new ReplaceRegexMatch()); + var substitutions = new Batch<>( + "Substitutions", + Limiter.ONCE, + new SubstituteSurrogates(), + new ReplaceRegexMatch(), + new ReplaceFieldAttributesWithExactSubfield() + ); var operators = new Batch<>( "Operator Optimization", @@ -786,11 +795,26 @@ private static Project pushDownPastProject(UnaryPlan parent) { } } - static class CombineDisjunctionsToIn extends org.elasticsearch.xpack.ql.optimizer.OptimizerRules.CombineDisjunctionsToIn { - @Override + /** + * Combine disjunctions on the same field into an In expression. + * This rule looks for both simple equalities: + * 1. a == 1 OR a == 2 becomes a IN (1, 2) + * and combinations of In + * 2. a == 1 OR a IN (2) becomes a IN (1, 2) + * 3. a IN (1) OR a IN (2) becomes a IN (1, 2) + * + * This rule does NOT check for type compatibility as that phase has been + * already be verified in the analyzer. + */ + public static class CombineDisjunctionsToIn extends OptimizerRules.CombineDisjunctionsToIn { + protected In createIn(Expression key, List values, ZoneId zoneId) { return new In(key.source(), key, values); } + + protected Equals createEquals(Expression k, Set v, ZoneId finalZoneId) { + return new Equals(k.source(), k, v.iterator().next(), finalZoneId); + } } static class ReplaceLimitAndSortAsTopN extends OptimizerRules.OptimizerRule { @@ -804,4 +828,29 @@ protected LogicalPlan rule(Limit plan) { return p; } } + + public static class ReplaceRegexMatch extends OptimizerRules.ReplaceRegexMatch { + + protected Expression regexToEquals(RegexMatch regexMatch, Literal literal) { + return new Equals(regexMatch.source(), regexMatch.field(), literal); + } + } + + private static class ReplaceFieldAttributesWithExactSubfield extends OptimizerRules.OptimizerRule { + + @Override + protected LogicalPlan rule(LogicalPlan plan) { + if (plan instanceof Filter || plan instanceof OrderBy || plan instanceof Aggregate) { + return plan.transformExpressionsOnly(FieldAttribute.class, ReplaceFieldAttributesWithExactSubfield::toExact); + } + return plan; + } + + private static FieldAttribute toExact(FieldAttribute fa) { + if (fa.getExactInfo().hasExact() && fa.exactAttribute() != fa) { + return fa.exactAttribute(); + } + return fa; + } + } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java index c1c86c007032e..b057dd8023031 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizer.java @@ -107,7 +107,8 @@ public PhysicalPlan apply(PhysicalPlan plan) { aliases.put(attr, as.child()); attributes.remove(attr); } else { - if (aliases.containsKey(attr) == false) { + // skip synthetically added attributes (the ones from AVG), see LogicalPlanOptimizer.SubstituteSurrogates + if (attr.synthetic() == false && aliases.containsKey(attr) == false) { attributes.add(attr); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java index 0f002ab8ef70f..2f527c54430a7 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/EsqlParser.java @@ -17,8 +17,8 @@ import org.antlr.v4.runtime.TokenFactory; import org.antlr.v4.runtime.TokenSource; import org.antlr.v4.runtime.atn.PredictionMode; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.ql.parser.CaseChangingCharStream; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; @@ -135,7 +135,7 @@ public Token nextToken() { Token token = delegate.nextToken(); if (token.getType() == EsqlBaseLexer.PARAM) { if (param >= params.size()) { - throw new ParsingException("Not enough actual parameters {} ", params.size()); + throw new ParsingException("Not enough actual parameters {}", params.size()); } paramTokens.put(token, params.get(param)); param++; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java index 4ebc5b5ee4fad..bbffa6c63e9b1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/parser/ExpressionBuilder.java @@ -12,6 +12,14 @@ import org.antlr.v4.runtime.tree.ParseTree; import org.antlr.v4.runtime.tree.TerminalNode; import org.elasticsearch.common.Strings; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.RLike; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.WildcardLike; +import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; @@ -25,7 +33,6 @@ import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.NamedExpression; -import org.elasticsearch.xpack.ql.expression.Order; import org.elasticsearch.xpack.ql.expression.UnresolvedAttribute; import org.elasticsearch.xpack.ql.expression.UnresolvedStar; import org.elasticsearch.xpack.ql.expression.function.FunctionResolutionStrategy; @@ -35,15 +42,8 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNull; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.regex.RLike; import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; import org.elasticsearch.xpack.ql.expression.predicate.regex.RegexMatch; -import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java index 5106e0e27e52b..a98017ef398dc 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/logical/Grok.java @@ -7,11 +7,11 @@ package org.elasticsearch.xpack.esql.plan.logical; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.grok.GrokBuiltinPatterns; import org.elasticsearch.grok.GrokCaptureConfig; import org.elasticsearch.grok.GrokCaptureType; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.esql.expression.NamedExpressions; import org.elasticsearch.xpack.esql.parser.ParsingException; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java index 5da84cbab4091..643d99696c80a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/EstimatesRowSize.java @@ -9,7 +9,7 @@ import org.elasticsearch.compute.data.DocVector; import org.elasticsearch.compute.data.ElementType; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.type.DataType; @@ -112,12 +112,12 @@ static int estimateSize(DataType dataType) { } yield 50; // wild estimate for the size of a string. } - case DOC -> throw new EsqlUnsupportedOperationException("can't load a [doc] with field extraction"); + case DOC -> throw new EsqlIllegalArgumentException("can't load a [doc] with field extraction"); case DOUBLE -> Double.BYTES; case INT -> Integer.BYTES; case LONG -> Long.BYTES; case NULL -> 0; - case UNKNOWN -> throw new EsqlUnsupportedOperationException("[unknown] can't be the result of field extraction"); + case UNKNOWN -> throw new EsqlIllegalArgumentException("[unknown] can't be the result of field extraction"); }; } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java index 3f15d81160a57..ecf3aed27d70e 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plan/physical/LeafExec.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.esql.plan.physical; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.ql.tree.Source; import java.util.Collections; @@ -21,6 +20,6 @@ protected LeafExec(Source source) { @Override public final LeafExec replaceChildren(List newChildren) { - throw new EsqlUnsupportedOperationException("this type of node doesn't have any children to replace"); + throw new UnsupportedOperationException("this type of node doesn't have any children to replace"); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java index ae443e552d725..0e984b3b85b0b 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AbstractPhysicalOperationProviders.java @@ -18,7 +18,6 @@ import org.elasticsearch.compute.operator.HashAggregationOperator.HashAggregationOperatorFactory; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.LocalExecutionPlannerContext; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner.PhysicalOperation; @@ -59,9 +58,9 @@ public final PhysicalOperation groupingPhysicalOperation( // append channels to the layout if (mode == AggregateExec.Mode.FINAL) { - layout.appendChannels(aggregates); + layout.append(aggregates); } else { - layout.appendChannels(aggregateMapper.mapNonGrouping(aggregates)); + layout.append(aggregateMapper.mapNonGrouping(aggregates)); } // create the agg factories aggregatesToFactory( @@ -88,8 +87,8 @@ public final PhysicalOperation groupingPhysicalOperation( if (groupAttribute == null) { throw new EsqlIllegalArgumentException("Unexpected non-named expression[{}] as grouping in [{}]", group, aggregateExec); } - Set grpAttribIds = new HashSet<>(); - grpAttribIds.add(groupAttribute.id()); + Layout.ChannelSet groupAttributeLayout = new Layout.ChannelSet(new HashSet<>(), groupAttribute.dataType()); + groupAttributeLayout.nameIds().add(groupAttribute.id()); /* * Check for aliasing in aggregates which occurs in two cases (due to combining project + stats): @@ -100,10 +99,9 @@ public final PhysicalOperation groupingPhysicalOperation( if (agg instanceof Alias a) { if (a.child() instanceof Attribute attr) { if (groupAttribute.id().equals(attr.id())) { - grpAttribIds.add(a.id()); + groupAttributeLayout.nameIds().add(a.id()); // TODO: investigate whether a break could be used since it shouldn't be possible to have multiple - // attributes - // pointing to the same attribute + // attributes pointing to the same attribute } // partial mode only // check if there's any alias used in grouping - no need for the final reduction since the intermediate data @@ -118,18 +116,19 @@ else if (mode == AggregateExec.Mode.PARTIAL) { } } } - layout.appendChannel(grpAttribIds); - groupSpecs.add(new GroupSpec(source.layout.getChannel(groupAttribute.id()), groupAttribute)); + layout.append(groupAttributeLayout); + Layout.ChannelAndType groupInput = source.layout.get(groupAttribute.id()); + groupSpecs.add(new GroupSpec(groupInput == null ? null : groupInput.channel(), groupAttribute)); } if (mode == AggregateExec.Mode.FINAL) { for (var agg : aggregates) { if (agg instanceof Alias alias && alias.child() instanceof AggregateFunction) { - layout.appendChannel(alias.id()); + layout.append(alias); } } } else { - layout.appendChannels(aggregateMapper.mapGrouping(aggregates)); + layout.append(aggregateMapper.mapGrouping(aggregates)); } // create the agg factories @@ -163,7 +162,7 @@ else if (mode == AggregateExec.Mode.PARTIAL) { if (operatorFactory != null) { return source.with(operatorFactory, layout.build()); } - throw new EsqlUnsupportedOperationException("no operator factory"); + throw new EsqlIllegalArgumentException("no operator factory"); } /*** @@ -253,7 +252,7 @@ private void aggregatesToFactory( params[i] = aggParams.get(i).fold(); } - List inputChannels = sourceAttr.stream().map(NamedExpression::id).map(layout::getChannel).toList(); + List inputChannels = sourceAttr.stream().map(attr -> layout.get(attr.id()).channel()).toList(); assert inputChannels != null && inputChannels.size() > 0 && inputChannels.stream().allMatch(i -> i >= 0); if (aggregateFunction instanceof ToAggregator agg) { consumer.accept(new AggFunctionSupplierContext(agg.supplier(bigArrays, inputChannels), aggMode)); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java index 3c948ec8517b4..81fe43ebade7f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/AggregateMapper.java @@ -11,7 +11,6 @@ import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.core.Tuple; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.expression.SurrogateExpression; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.CountDistinct; @@ -112,7 +111,7 @@ private List computeEntryForAgg(Expression aggregate, // This condition is a little pedantic, but do we expected other expressions here? if so, then add them return List.of(); } else { - throw new EsqlUnsupportedOperationException("unknown: " + aggregate.getClass() + ": " + aggregate); + throw new EsqlIllegalArgumentException("unknown agg: " + aggregate.getClass() + ": " + aggregate); } } @@ -202,7 +201,7 @@ static DataType toDataType(ElementType elementType) { case INT -> DataTypes.INTEGER; case LONG -> DataTypes.LONG; case DOUBLE -> DataTypes.DOUBLE; - default -> throw new EsqlUnsupportedOperationException("unsupported agg type: " + elementType); + default -> throw new EsqlIllegalArgumentException("unsupported agg type: " + elementType); }; } @@ -222,7 +221,7 @@ static String dataTypeToString(DataType type, Class aggClass) { } else if (type.equals(DataTypes.KEYWORD) || type.equals(DataTypes.IP)) { return "BytesRef"; } else { - throw new EsqlUnsupportedOperationException("unsupported agg type: " + type); + throw new EsqlIllegalArgumentException("illegal agg type: " + type.typeName()); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/DefaultLayout.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/DefaultLayout.java new file mode 100644 index 0000000000000..e3a520149108a --- /dev/null +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/DefaultLayout.java @@ -0,0 +1,62 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner; + +import org.elasticsearch.xpack.ql.expression.NameId; + +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +class DefaultLayout implements Layout { + private final Map layout; + private final int numberOfChannels; + + DefaultLayout(Map layout, int numberOfChannels) { + this.layout = layout; + this.numberOfChannels = numberOfChannels; + } + + @Override + public ChannelAndType get(NameId id) { + return layout.get(id); + } + + /** + * @return the total number of channels in the layout. + */ + @Override + public int numberOfChannels() { + return numberOfChannels; + } + + @Override + public Map> inverse() { + Map> inverse = new HashMap<>(); + for (Map.Entry entry : layout.entrySet()) { + NameId key = entry.getKey(); + Integer value = entry.getValue().channel(); + inverse.computeIfAbsent(value, k -> new HashSet<>()).add(key); + } + return inverse; + } + + /** + * @return creates a builder to append to this layout. + */ + @Override + public Layout.Builder builder() { + return new Builder(numberOfChannels, layout); + } + + @Override + public String toString() { + return "Layout{" + "layout=" + layout + ", numberOfChannels=" + numberOfChannels + '}'; + } +} diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java index 3a72bff0d0c82..bae1980a3e856 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/EsPhysicalOperationProviders.java @@ -13,11 +13,10 @@ import org.elasticsearch.compute.aggregation.GroupingAggregator; import org.elasticsearch.compute.data.ElementType; import org.elasticsearch.compute.lucene.LuceneOperator; -import org.elasticsearch.compute.lucene.LuceneSourceOperator.LuceneSourceOperatorFactory; -import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory; +import org.elasticsearch.compute.lucene.LuceneSourceOperator; +import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.lucene.ValueSources; import org.elasticsearch.compute.lucene.ValuesSourceReaderOperator; -import org.elasticsearch.compute.operator.EmptySourceOperator; import org.elasticsearch.compute.operator.Operator; import org.elasticsearch.compute.operator.OrdinalsGroupingOperator; import org.elasticsearch.index.mapper.NestedLookup; @@ -62,7 +61,7 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi PhysicalOperation op = source; for (Attribute attr : fieldExtractExec.attributesToExtract()) { - layout.appendChannel(attr.id()); + layout.append(attr); Layout previousLayout = op.layout; var sources = ValueSources.sources( @@ -72,7 +71,7 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi LocalExecutionPlanner.toElementType(attr.dataType()) ); - int docChannel = previousLayout.getChannel(sourceAttr.id()); + int docChannel = previousLayout.get(sourceAttr.id()).channel(); op = op.with( new ValuesSourceReaderOperator.ValuesSourceReaderOperatorFactory(sources, docChannel, attr.name()), @@ -84,7 +83,7 @@ public final PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fi @Override public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, LocalExecutionPlannerContext context) { - LuceneOperator.LuceneOperatorFactory operatorFactory = null; + final LuceneOperator.Factory luceneFactory; Function querySupplier = searchContext -> { SearchExecutionContext ctx = searchContext.getSearchExecutionContext(); Query query = ctx.toQuery(esQueryExec.query()).query(); @@ -118,7 +117,7 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, for (FieldSort sort : sorts) { fieldSorts.add(sort.fieldSortBuilder()); } - operatorFactory = new LuceneTopNSourceOperatorFactory( + luceneFactory = new LuceneTopNSourceOperator.Factory( searchContexts, querySupplier, context.dataPartitioning(), @@ -128,7 +127,7 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, fieldSorts ); } else { - operatorFactory = new LuceneSourceOperatorFactory( + luceneFactory = new LuceneSourceOperator.Factory( searchContexts, querySupplier, context.dataPartitioning(), @@ -138,15 +137,10 @@ public final PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, ); } Layout.Builder layout = new Layout.Builder(); - for (int i = 0; i < esQueryExec.output().size(); i++) { - layout.appendChannel(esQueryExec.output().get(i).id()); - } - if (operatorFactory.size() > 0) { - context.driverParallelism(new DriverParallelism(DriverParallelism.Type.DATA_PARALLELISM, operatorFactory.size())); - return PhysicalOperation.fromSource(operatorFactory, layout.build()); - } else { - return PhysicalOperation.fromSource(new EmptySourceOperator.Factory(), layout.build()); - } + layout.append(esQueryExec.output()); + int instanceCount = Math.max(1, luceneFactory.taskConcurrency()); + context.driverParallelism(new DriverParallelism(DriverParallelism.Type.DATA_PARALLELISM, instanceCount)); + return PhysicalOperation.fromSource(luceneFactory, layout.build()); } @Override @@ -159,7 +153,7 @@ public final Operator.OperatorFactory ordinalGroupingOperatorFactory( LocalExecutionPlannerContext context ) { var sourceAttribute = FieldExtractExec.extractSourceAttributesFrom(aggregateExec.child()); - int docChannel = source.layout.getChannel(sourceAttribute.id()); + int docChannel = source.layout.get(sourceAttribute.id()).channel(); // The grouping-by values are ready, let's group on them directly. // Costin: why are they ready and not already exposed in the layout? return new OrdinalsGroupingOperator.OrdinalsGroupingOperatorFactory( diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ExchangeLayout.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ExchangeLayout.java index 327dc588e09b6..43bb36d070b79 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ExchangeLayout.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/ExchangeLayout.java @@ -7,43 +7,32 @@ package org.elasticsearch.xpack.esql.planner; -import org.elasticsearch.common.util.Maps; import org.elasticsearch.xpack.ql.expression.NameId; -import java.util.HashSet; +import java.util.HashMap; import java.util.Map; import java.util.Set; -import static java.util.Collections.emptyMap; - /** * Decorating layout that creates the NameId -> Value lazily based on the calls made to its content. * Essentially it maps the existing (old) NameIds to the new ones. */ -class ExchangeLayout extends Layout { - - private final Map delegate; +class ExchangeLayout implements Layout { + private final Layout delegate; private final Map> inverse; private final Map mappingToOldLayout; private int counter; - ExchangeLayout(Layout layout) { - super(emptyMap(), 0); - this.delegate = layout.internalLayout(); - this.mappingToOldLayout = Maps.newMapWithExpectedSize(delegate.size()); - this.inverse = Maps.newMapWithExpectedSize(delegate.size()); - - for (Map.Entry entry : delegate.entrySet()) { - NameId key = entry.getKey(); - Integer value = entry.getValue(); - inverse.computeIfAbsent(value, k -> new HashSet<>()).add(key); - } + ExchangeLayout(Layout delegate) { + this.delegate = delegate; + this.inverse = delegate.inverse(); + this.mappingToOldLayout = new HashMap<>(inverse.size()); } @Override - public Integer getChannel(NameId id) { + public ChannelAndType get(NameId id) { var oldId = mappingToOldLayout.get(id); - if (oldId == null && counter < delegate.size()) { + if (oldId == null && counter < inverse.size()) { var names = inverse.get(counter++); for (var name : names) { oldId = name; @@ -54,12 +43,22 @@ public Integer getChannel(NameId id) { } @Override - public int numberOfIds() { - return delegate.size(); + public int numberOfChannels() { + return delegate.numberOfChannels(); } @Override - public int numberOfChannels() { - return inverse.size(); + public String toString() { + return "ExchangeLayout{" + delegate + '}'; + } + + @Override + public Builder builder() { + throw new UnsupportedOperationException(); + } + + @Override + public Map> inverse() { + throw new UnsupportedOperationException(); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java index af7c94f45310f..a97a467aa3c0a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Layout.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.ql.expression.NameId; import org.elasticsearch.xpack.ql.expression.NamedExpression; +import org.elasticsearch.xpack.ql.type.DataType; import java.util.ArrayList; import java.util.Collection; @@ -18,119 +19,109 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.IntStream; /** * Maintains the mapping from attribute ids to channels (block index). * * An attribute can only be mapped to exactly one channel but one channel can be mapped to multiple attributes. */ -public class Layout { - - private final Map layout; - private final int numberOfChannels; - - Layout(Map layout, int numberOfChannels) { - this.layout = layout; - this.numberOfChannels = numberOfChannels; - } +public interface Layout { + /** + * The values stored in the {@link Layout}, a channel id and a {@link DataType}. + */ + record ChannelAndType(int channel, DataType type) {} /** - * @param id the attribute id - * @return the channel to which the specific attribute id is mapped or `null` if the attribute id does not exist in the layout. + * A part of an "inverse" layout, a {@link Set} or {@link NameId}s and a {@link DataType}. */ - public Integer getChannel(NameId id) { - return layout.get(id); - } + record ChannelSet(Set nameIds, DataType type) {} /** - * @return the total number of ids in the layout. + * @param id the attribute id + * @return the channel to which the specific attribute id is mapped or `null` if the attribute id does not exist in the layout. */ - public int numberOfIds() { - return layout.size(); - } + ChannelAndType get(NameId id); /** * @return the total number of channels in the layout. */ - public int numberOfChannels() { - return numberOfChannels; - } - - Map internalLayout() { - return layout; - } + int numberOfChannels(); /** * @return creates a builder to append to this layout. */ - public Layout.Builder builder() { - return new Layout.Builder(this); - } + Layout.Builder builder(); - @Override - public String toString() { - return "BlockLayout{" + "layout=" + layout + ", numberOfChannels=" + numberOfChannels + '}'; - } + Map> inverse(); /** * Builder class for Layout. The builder ensures that layouts cannot be altered after creation (through references to the underlying * map). */ - public static class Builder { + class Builder { + private final List channels = new ArrayList<>(); - private final List> channels; + public Builder() {} - public Builder() { - this.channels = new ArrayList<>(); - } - - private Builder(Layout layout) { - channels = IntStream.range(0, layout.numberOfChannels).>mapToObj(i -> new HashSet<>()).collect(Collectors.toList()); - for (Map.Entry entry : layout.layout.entrySet()) { - channels.get(entry.getValue()).add(entry.getKey()); + Builder(int numberOfChannels, Map layout) { + for (int i = 0; i < numberOfChannels; i++) { + channels.add(null); + } + for (Map.Entry entry : layout.entrySet()) { + ChannelSet set = channels.get(entry.getValue().channel); + if (set == null) { + set = new ChannelSet(new HashSet<>(), entry.getValue().type()); + channels.set(entry.getValue().channel, set); + } else { + if (set.type != entry.getValue().type()) { + throw new IllegalArgumentException(); + } + } + set.nameIds.add(entry.getKey()); } } /** - * Appends a new channel to the layout. The channel is mapped to a single attribute id. - * @param id the attribute id + * Appends a new channel to the layout. The channel is mapped to one or more attribute ids. */ - public Builder appendChannel(NameId id) { - channels.add(Set.of(id)); + public Builder append(ChannelSet set) { + if (set.nameIds.size() < 1) { + throw new IllegalArgumentException("Channel must be mapped to at least one id."); + } + channels.add(set); return this; } /** - * Appends a new channel to the layout. The channel is mapped to one or more attribute ids. - * @param ids the attribute ids + * Appends a new channel to the layout. The channel is mapped to a single attribute id. */ - public Builder appendChannel(Set ids) { - if (ids.size() < 1) { - throw new IllegalArgumentException("Channel must be mapped to at least one id."); - } - channels.add(ids); - return this; + public Builder append(NamedExpression attribute) { + return append(new ChannelSet(Set.of(attribute.id()), attribute.dataType())); } - public Builder appendChannels(Collection attributes) { - for (var attribute : attributes) { - appendChannel(attribute.id()); + /** + * Appends many new channels to the layout. Each channel is mapped to a single attribute id. + */ + public Builder append(Collection attributes) { + for (NamedExpression attribute : attributes) { + append(new ChannelSet(Set.of(attribute.id()), attribute.dataType())); } return this; } + /** + * Build a new {@link Layout}. + */ public Layout build() { - Map layout = new HashMap<>(); + Map layout = new HashMap<>(); int numberOfChannels = 0; - for (Set ids : this.channels) { + for (ChannelSet set : channels) { int channel = numberOfChannels++; - for (NameId id : ids) { - layout.putIfAbsent(id, channel); + for (NameId id : set.nameIds) { + layout.putIfAbsent(id, new ChannelAndType(channel, set.type)); } } - return new Layout(Collections.unmodifiableMap(layout), numberOfChannels); + return new DefaultLayout(Collections.unmodifiableMap(layout), numberOfChannels); } } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java index 06b5ef78dc64d..467e04deb579d 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlanner.java @@ -44,7 +44,6 @@ import org.elasticsearch.index.query.MatchAllQueryBuilder; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; import org.elasticsearch.xpack.esql.enrich.EnrichLookupOperator; import org.elasticsearch.xpack.esql.enrich.EnrichLookupService; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; @@ -203,7 +202,7 @@ else if (node instanceof OutputExec outputExec) { return planExchangeSink(exchangeSink, context); } - throw new EsqlUnsupportedOperationException("unknown physical plan node [" + node.nodeName() + "]"); + throw new EsqlIllegalArgumentException("unknown physical plan node [" + node.nodeName() + "]"); } private PhysicalOperation planAggregation(AggregateExec aggregate, LocalExecutionPlannerContext context) { @@ -257,7 +256,7 @@ public static ElementType toElementType(DataType dataType) { if (dataType == DataTypes.BOOLEAN) { return ElementType.BOOLEAN; } - throw EsqlUnsupportedOperationException.unsupportedDataType(dataType); + throw EsqlIllegalArgumentException.illegalDataType(dataType); } private PhysicalOperation planOutput(OutputExec outputExec, LocalExecutionPlannerContext context) { @@ -282,7 +281,7 @@ private static Function alignPageToAttributes(List attrs, int index = -1; boolean transformRequired = false; for (var attribute : attrs) { - mappedPosition[++index] = layout.getChannel(attribute.id()); + mappedPosition[++index] = layout.get(attribute.id()).channel(); transformRequired |= mappedPosition[index] != index; } Function transformer = transformRequired ? p -> { @@ -297,7 +296,7 @@ private static Function alignPageToAttributes(List attrs, } private PhysicalOperation planExchange(ExchangeExec exchangeExec, LocalExecutionPlannerContext context) { - throw new EsqlUnsupportedOperationException("Exchange needs to be replaced with a sink/source"); + throw new UnsupportedOperationException("Exchange needs to be replaced with a sink/source"); } private PhysicalOperation planExchangeSink(ExchangeSinkExec exchangeSink, LocalExecutionPlannerContext context) { @@ -315,9 +314,7 @@ private PhysicalOperation planExchangeSource(ExchangeSourceExec exchangeSource, Objects.requireNonNull(exchangeSourceHandler, "ExchangeSourceHandler wasn't provided"); var builder = new Layout.Builder(); - for (var attr : exchangeSource.output()) { - builder.appendChannel(attr.id()); - } + builder.append(exchangeSource.output()); // decorate the layout var l = builder.build(); var layout = exchangeSource.isIntermediateAgg() ? new ExchangeLayout(l) : l; @@ -331,7 +328,7 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte List orders = topNExec.order().stream().map(order -> { int sortByChannel; if (order.child() instanceof Attribute a) { - sortByChannel = source.layout.getChannel(a.id()); + sortByChannel = source.layout.get(a.id()).channel(); } else { throw new EsqlIllegalArgumentException("order by expression must be an attribute"); } @@ -365,7 +362,7 @@ private PhysicalOperation planTopN(TopNExec topNExec, LocalExecutionPlannerConte if (topNExec.limit() instanceof Literal literal) { limit = Integer.parseInt(literal.value().toString()); } else { - throw new EsqlUnsupportedOperationException("limit only supported with literal values"); + throw new EsqlIllegalArgumentException("limit only supported with literal values"); } // TODO Replace page size with passing estimatedRowSize down @@ -387,7 +384,7 @@ private PhysicalOperation planEval(EvalExec eval, LocalExecutionPlannerContext c Supplier evaluatorSupplier; evaluatorSupplier = EvalMapper.toEvaluator(field.child(), source.layout); Layout.Builder layout = source.layout.builder(); - layout.appendChannel(field.toAttribute().id()); + layout.append(field.toAttribute()); source = source.with(new EvalOperatorFactory(evaluatorSupplier), layout.build()); } return source; @@ -396,9 +393,7 @@ private PhysicalOperation planEval(EvalExec eval, LocalExecutionPlannerContext c private PhysicalOperation planDissect(DissectExec dissect, LocalExecutionPlannerContext context) { PhysicalOperation source = plan(dissect.child(), context); Layout.Builder layoutBuilder = source.layout.builder(); - for (Attribute attr : dissect.extractedFields()) { - layoutBuilder.appendChannel(attr.id()); - } + layoutBuilder.append(dissect.extractedFields()); final Expression expr = dissect.inputExpression(); String[] attributeNames = Expressions.names(dissect.extractedFields()).toArray(new String[0]); @@ -418,10 +413,7 @@ private PhysicalOperation planGrok(GrokExec grok, LocalExecutionPlannerContext c PhysicalOperation source = plan(grok.child(), context); Layout.Builder layoutBuilder = source.layout.builder(); List extractedFields = grok.extractedFields(); - for (Attribute attr : extractedFields) { - layoutBuilder.appendChannel(attr.id()); - } - + layoutBuilder.append(extractedFields); Map fieldToPos = new HashMap<>(extractedFields.size()); Map fieldToType = new HashMap<>(extractedFields.size()); ElementType[] types = new ElementType[extractedFields.size()]; @@ -448,10 +440,7 @@ private PhysicalOperation planGrok(GrokExec grok, LocalExecutionPlannerContext c private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerContext context) { PhysicalOperation source = plan(enrich.child(), context); Layout.Builder layoutBuilder = source.layout.builder(); - List extractedFields = enrich.enrichFields(); - for (NamedExpression attr : extractedFields) { - layoutBuilder.appendChannel(attr.id()); - } + layoutBuilder.append(enrich.enrichFields()); Layout layout = layoutBuilder.build(); Set indices = enrich.enrichIndex().concreteIndices(); if (indices.size() != 1) { @@ -463,7 +452,7 @@ private PhysicalOperation planEnrich(EnrichExec enrich, LocalExecutionPlannerCon sessionId, parentTask, 1, // TODO: Add a concurrent setting for enrich - also support unordered mode - source.layout.getChannel(enrich.matchField().id()), + source.layout.get(enrich.matchField().id()).channel(), enrichLookupService, enrichIndex, "match", // TODO: enrich should also resolve the match_type @@ -481,20 +470,13 @@ private Supplier toEvaluator(Expression exp, Layout layout) private PhysicalOperation planRow(RowExec row, LocalExecutionPlannerContext context) { List obj = row.fields().stream().map(f -> f.child().fold()).toList(); Layout.Builder layout = new Layout.Builder(); - var output = row.output(); - for (Attribute attribute : output) { - layout.appendChannel(attribute.id()); - } + layout.append(row.output()); return PhysicalOperation.fromSource(new RowOperatorFactory(obj), layout.build()); } private PhysicalOperation planLocal(LocalSourceExec localSourceExec, LocalExecutionPlannerContext context) { - Layout.Builder layout = new Layout.Builder(); - var output = localSourceExec.output(); - for (Attribute attribute : output) { - layout.appendChannel(attribute.id()); - } + layout.append(localSourceExec.output()); LocalSourceOperator.BlockSupplier supplier = () -> localSourceExec.supplier().get(); var operator = new LocalSourceOperator(supplier); return PhysicalOperation.fromSource(new LocalSourceFactory(() -> operator), layout.build()); @@ -502,16 +484,14 @@ private PhysicalOperation planLocal(LocalSourceExec localSourceExec, LocalExecut private PhysicalOperation planShow(ShowExec showExec) { Layout.Builder layout = new Layout.Builder(); - for (var attribute : showExec.output()) { - layout.appendChannel(attribute.id()); - } + layout.append(showExec.output()); return PhysicalOperation.fromSource(new ShowOperator.ShowOperatorFactory(showExec.values()), layout.build()); } private PhysicalOperation planProject(ProjectExec project, LocalExecutionPlannerContext context) { var source = plan(project.child(), context); - Map> inputChannelToOutputIds = new HashMap<>(); + Map inputChannelToOutputIds = new HashMap<>(); for (NamedExpression ne : project.projections()) { NameId inputId; if (ne instanceof Alias a) { @@ -519,19 +499,26 @@ private PhysicalOperation planProject(ProjectExec project, LocalExecutionPlanner } else { inputId = ne.id(); } - int inputChannel = source.layout.getChannel(inputId); - inputChannelToOutputIds.computeIfAbsent(inputChannel, ignore -> new HashSet<>()).add(ne.id()); + Layout.ChannelAndType input = source.layout.get(inputId); + Layout.ChannelSet channelSet = inputChannelToOutputIds.computeIfAbsent( + input.channel(), + ignore -> new Layout.ChannelSet(new HashSet<>(), input.type()) + ); + if (channelSet.type() != input.type()) { + throw new IllegalArgumentException("type mismatch for aliases"); + } + channelSet.nameIds().add(ne.id()); } BitSet mask = new BitSet(); Layout.Builder layout = new Layout.Builder(); for (int inChannel = 0; inChannel < source.layout.numberOfChannels(); inChannel++) { - Set outputIds = inputChannelToOutputIds.get(inChannel); + Layout.ChannelSet outputSet = inputChannelToOutputIds.get(inChannel); - if (outputIds != null) { + if (outputSet != null) { mask.set(inChannel); - layout.appendChannel(outputIds); + layout.append(outputSet); } } @@ -556,7 +543,7 @@ private PhysicalOperation planLimit(LimitExec limit, LocalExecutionPlannerContex private PhysicalOperation planMvExpand(MvExpandExec mvExpandExec, LocalExecutionPlannerContext context) { PhysicalOperation source = plan(mvExpandExec.child(), context); - return source.with(new MvExpandOperator.Factory(source.layout.getChannel(mvExpandExec.target().id())), source.layout); + return source.with(new MvExpandOperator.Factory(source.layout.get(mvExpandExec.target().id()).channel()), source.layout); } /** diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java index 43d62acc2ed8b..eb50a1ceb4071 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/Mapper.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.esql.planner; -import org.elasticsearch.xpack.esql.EsqlUnsupportedOperationException; +import org.elasticsearch.xpack.esql.EsqlIllegalArgumentException; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; import org.elasticsearch.xpack.esql.plan.logical.Eval; @@ -107,7 +107,7 @@ public PhysicalPlan map(LogicalPlan p) { return plan; } - throw new EsqlUnsupportedOperationException("unsupported logical plan node [" + p.nodeName() + "]"); + throw new EsqlIllegalArgumentException("unsupported logical plan node [" + p.nodeName() + "]"); } private static boolean isPipelineBreaker(LogicalPlan p) { @@ -173,7 +173,7 @@ private PhysicalPlan map(UnaryPlan p, PhysicalPlan child) { return map(aggregate, child); } - throw new EsqlUnsupportedOperationException("unsupported unary logical plan node [" + p.nodeName() + "]"); + throw new EsqlIllegalArgumentException("unsupported unary logical plan node [" + p.nodeName() + "]"); } private PhysicalPlan map(Aggregate aggregate, PhysicalPlan child) { diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java index 77873e11382bc..75f704b9f14ea 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/planner/PlannerUtils.java @@ -24,15 +24,25 @@ import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.session.EsqlConfiguration; import org.elasticsearch.xpack.esql.stats.SearchStats; +import org.elasticsearch.xpack.ql.expression.AttributeSet; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.predicate.Predicates; import org.elasticsearch.xpack.ql.plan.logical.EsRelation; +import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.util.Holder; +import org.elasticsearch.xpack.ql.util.Queries; -import java.util.Arrays; +import java.util.ArrayList; import java.util.LinkedHashSet; import java.util.List; import java.util.Set; +import static java.util.Arrays.asList; +import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.PushFiltersToSource.canPushToSource; +import static org.elasticsearch.xpack.esql.optimizer.LocalPhysicalPlanOptimizer.TRANSLATOR_HANDLER; +import static org.elasticsearch.xpack.ql.util.Queries.Clause.FILTER; + public class PlannerUtils { public static Tuple breakPlanBetweenCoordinatorAndDataNode(PhysicalPlan plan, EsqlConfiguration config) { @@ -72,7 +82,7 @@ public static String[] planOriginalIndices(PhysicalPlan plan) { plan.forEachUp( FragmentExec.class, f -> f.fragment() - .forEachUp(EsRelation.class, r -> indices.addAll(Arrays.asList(Strings.commaDelimitedListToStringArray(r.index().name())))) + .forEachUp(EsRelation.class, r -> indices.addAll(asList(Strings.commaDelimitedListToStringArray(r.index().name())))) ); return indices.toArray(String[]::new); } @@ -116,8 +126,41 @@ public static PhysicalPlan localPlan( * Extracts the ES query provided by the filter parameter */ public static QueryBuilder requestFilter(PhysicalPlan plan) { - var filter = new Holder(null); - plan.forEachDown(FragmentExec.class, es -> filter.set(es.esFilter())); - return filter.get(); + return detectFilter(plan, "@timestamp"); + } + + static QueryBuilder detectFilter(PhysicalPlan plan, String fieldName) { + // first position is the REST filter, the second the query filter + var requestFilter = new QueryBuilder[] { null, null }; + + plan.forEachDown(FragmentExec.class, fe -> { + requestFilter[0] = fe.esFilter(); + // detect filter inside the query + fe.fragment().forEachUp(Filter.class, f -> { + // the only filter that can be pushed down is that on top of the relation + // reuses the logic from LocalPhysicalPlanOptimizer#PushFiltersToSource + // but get executed on the logical plan + List matches = new ArrayList<>(); + if (f.child() instanceof EsRelation) { + var conjunctions = Predicates.splitAnd(f.condition()); + // look only at expressions that contain literals and the target field + for (var exp : conjunctions) { + var refs = new AttributeSet(exp.references()); + // remove literals or attributes that match by name + boolean matchesField = refs.removeIf(e -> fieldName.equals(e.name())); + // the expression only contains the target reference + // and the expression is pushable (functions can be fully translated) + if (matchesField && refs.isEmpty() && canPushToSource(exp)) { + matches.add(exp); + } + } + } + if (matches.size() > 0) { + requestFilter[1] = TRANSLATOR_HANDLER.asQuery(Predicates.combineAnd(matches)).asBuilder(); + } + }); + }); + + return Queries.combine(FILTER, asList(requestFilter)); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java index f97d95e8a8775..f725e4fe60175 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/ComputeService.java @@ -140,6 +140,9 @@ public void execute( return; } QueryBuilder requestFilter = PlannerUtils.requestFilter(dataNodePlan); + + LOGGER.info("Sending data node plan\n{}\n with filter [{}]", dataNodePlan, requestFilter); + String[] originalIndices = PlannerUtils.planOriginalIndices(physicalPlan); computeTargetNodes( rootTask, @@ -247,16 +250,16 @@ void runCompute(CancellableTask task, ComputeContext context, PhysicalPlan plan, new EsPhysicalOperationProviders(context.searchContexts) ); - LOGGER.info("Received physical plan:\n{}", plan); + LOGGER.debug("Received physical plan:\n{}", plan); plan = PlannerUtils.localPlan(context.searchContexts, context.configuration, plan); LocalExecutionPlanner.LocalExecutionPlan localExecutionPlan = planner.plan(plan); - LOGGER.info("Local execution plan:\n{}", localExecutionPlan.describe()); + LOGGER.debug("Local execution plan:\n{}", localExecutionPlan.describe()); drivers = localExecutionPlan.createDrivers(context.sessionId); if (drivers.isEmpty()) { throw new IllegalStateException("no drivers created"); } - LOGGER.info("using {} drivers", drivers.size()); + LOGGER.debug("using {} drivers", drivers.size()); } catch (Exception e) { listener.onFailure(e); return; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java index 0a2dfae30a1fc..d8e5e576386e3 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/DataNodeRequest.java @@ -57,7 +57,7 @@ final class DataNodeRequest extends TransportRequest implements IndicesRequest { super(in); this.sessionId = in.readString(); this.configuration = new EsqlConfiguration(in); - this.shardIds = in.readList(ShardId::new); + this.shardIds = in.readCollectionAsList(ShardId::new); this.aliasFilters = in.readMap(Index::new, AliasFilter::readFrom); this.plan = new PlanStreamInput(in, planNameRegistry, in.namedWriteableRegistry(), configuration).readPhysicalPlanNode(); } @@ -67,7 +67,7 @@ public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeString(sessionId); configuration.writeTo(out); - out.writeList(shardIds); + out.writeCollection(shardIds); out.writeMap(aliasFilters); new PlanStreamOutput(out, planNameRegistry).writePhysicalPlanNode(plan); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlStatsResponse.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlStatsResponse.java index d70416cdbda82..ec75cbfd01da1 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlStatsResponse.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/EsqlStatsResponse.java @@ -33,12 +33,12 @@ public EsqlStatsResponse(ClusterName clusterName, List nodes, @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeStatsResponse::readNodeResponse); + return in.readCollectionAsList(NodeStatsResponse::readNodeResponse); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java index a5b8fb601e089..bb1f669dc2b43 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/plugin/QueryPragmas.java @@ -59,6 +59,10 @@ public void writeTo(StreamOutput out) throws IOException { settings.writeTo(out); } + public Settings getSettings() { + return settings; + } + public int exchangeBufferSize() { return EXCHANGE_BUFFER_SIZE.get(settings); } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java index 69e78ab7199e2..b554ccb2920aa 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/querydsl/query/SingleValueQuery.java @@ -22,6 +22,7 @@ import org.apache.lucene.search.TwoPhaseIterator; import org.apache.lucene.search.Weight; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -165,7 +166,7 @@ protected void doXContent(XContentBuilder builder, Params params) throws IOExcep @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_500_065; // This is 8.11 - the first version of ESQL + return TransportVersions.V_8_500_065; // This is 8.11 - the first version of ESQL } @Override diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java index 3498025fb6502..1cde7857310ae 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/session/EsqlSession.java @@ -7,12 +7,13 @@ package org.elasticsearch.xpack.esql.session; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.RefCountingListener; +import org.elasticsearch.common.regex.Regex; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.logging.LogManager; +import org.elasticsearch.logging.Logger; import org.elasticsearch.xpack.esql.action.EsqlQueryRequest; import org.elasticsearch.xpack.esql.analysis.Analyzer; import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; @@ -25,17 +26,27 @@ import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; import org.elasticsearch.xpack.esql.parser.EsqlParser; import org.elasticsearch.xpack.esql.parser.TypedParamValue; +import org.elasticsearch.xpack.esql.plan.logical.Keep; +import org.elasticsearch.xpack.esql.plan.logical.RegexExtract; import org.elasticsearch.xpack.esql.plan.physical.EstimatesRowSize; import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; import org.elasticsearch.xpack.esql.planner.Mapper; import org.elasticsearch.xpack.ql.analyzer.TableInfo; +import org.elasticsearch.xpack.ql.expression.Alias; +import org.elasticsearch.xpack.ql.expression.Attribute; +import org.elasticsearch.xpack.ql.expression.AttributeSet; +import org.elasticsearch.xpack.ql.expression.MetadataAttribute; +import org.elasticsearch.xpack.ql.expression.UnresolvedStar; import org.elasticsearch.xpack.ql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.index.MappingException; import org.elasticsearch.xpack.ql.plan.TableIdentifier; +import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; +import org.elasticsearch.xpack.ql.plan.logical.Project; +import org.elasticsearch.xpack.ql.util.Holder; import java.util.HashSet; import java.util.List; @@ -43,9 +54,11 @@ import java.util.Set; import java.util.concurrent.atomic.AtomicReference; import java.util.function.BiFunction; +import java.util.stream.Collectors; import static org.elasticsearch.index.query.QueryBuilders.boolQuery; import static org.elasticsearch.xpack.ql.util.ActionListeners.map; +import static org.elasticsearch.xpack.ql.util.StringUtils.WILDCARD; public class EsqlSession { @@ -159,7 +172,8 @@ private void preAnalyzeIndices(LogicalPlan parsed, ActionListener void preAnalyzeIndices(LogicalPlan parsed, ActionListener fieldNames(LogicalPlan parsed) { + if (false == parsed.anyMatch(plan -> plan instanceof Aggregate || plan instanceof Project)) { + // no explicit columns selection, for example "from employees" + return IndexResolver.ALL_FIELDS; + } + + Holder projectAll = new Holder<>(false); + parsed.forEachExpressionDown(UnresolvedStar.class, us -> {// explicit "*" fields selection + if (projectAll.get()) { + return; + } + projectAll.set(true); + }); + if (projectAll.get()) { + return IndexResolver.ALL_FIELDS; + } + + AttributeSet references = new AttributeSet(); + // "keep" attributes are special whenever a wildcard is used in their name + // ie "from test | eval lang = languages + 1 | keep *l" should consider both "languages" and "*l" as valid fields to ask for + AttributeSet keepCommandReferences = new AttributeSet(); + + parsed.forEachDown(p -> {// go over each plan top-down + if (p instanceof RegexExtract re) { // for Grok and Dissect + AttributeSet dissectRefs = p.references(); + // don't add to the list of fields the extracted ones (they are not real fields in mappings) + dissectRefs.removeAll(re.extractedFields()); + references.addAll(dissectRefs); + // also remove other down-the-tree references to the extracted fields + for (Attribute extracted : re.extractedFields()) { + references.removeIf(attr -> matchByName(attr, extracted.qualifiedName(), false)); + } + } else { + references.addAll(p.references()); + if (p instanceof Keep) { + keepCommandReferences.addAll(p.references()); + } + } + + // remove any already discovered UnresolvedAttributes that are in fact aliases defined later down in the tree + // for example "from test | eval x = salary | stats max = max(x) by gender" + // remove the UnresolvedAttribute "x", since that is an Alias defined in "eval" + p.forEachExpressionDown(Alias.class, alias -> { + // do not remove the UnresolvedAttribute that has the same name as its alias, ie "rename id = id" + // or the UnresolvedAttributes that are used in Functions that have aliases "STATS id = MAX(id)" + if (p.references().names().contains(alias.qualifiedName())) { + return; + } + references.removeIf(attr -> matchByName(attr, alias.qualifiedName(), keepCommandReferences.contains(attr))); + }); + }); + + // remove valid metadata attributes because they will be filtered out by the IndexResolver anyway + // otherwise, in some edge cases, we will fail to ask for "*" (all fields) instead + references.removeIf(a -> a instanceof MetadataAttribute || MetadataAttribute.isSupported(a.qualifiedName())); + Set fieldNames = references.names(); + if (fieldNames.isEmpty()) { + return IndexResolver.ALL_FIELDS; + } else { + fieldNames.addAll( + fieldNames.stream().filter(name -> name.endsWith(WILDCARD) == false).map(name -> name + ".*").collect(Collectors.toSet()) + ); + return fieldNames; + } + } + + private static boolean matchByName(Attribute attr, String other, boolean skipIfPattern) { + boolean isPattern = Regex.isSimpleMatchPattern(attr.qualifiedName()); + if (skipIfPattern && isPattern) { + return false; + } + return isPattern ? Regex.simpleMatch(attr.qualifiedName(), other) : attr.qualifiedName().equals(other); + } + public void optimizedPlan(LogicalPlan logicalPlan, ActionListener listener) { analyzedPlan(logicalPlan, map(listener, p -> { var plan = logicalPlanOptimizer.optimize(p); diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java index 9e6b55e6333ec..f9a70cefbb57a 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/stats/SearchStats.java @@ -135,7 +135,7 @@ public byte[] max(String field, DataType dataType) { } // - // @see org.elasticsearch.search.query.TopDocsCollectorManagerFactory#shortcutTotalHitCount(IndexReader, Query) + // @see org.elasticsearch.search.query.QueryPhaseCollectorManager#shortcutTotalHitCount(IndexReader, Query) // private static int countEntries(IndexReader indexReader, String field) { int count = 0; diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java index f2b67196ac11f..a0883fce4b79f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypeRegistry.java @@ -10,9 +10,13 @@ import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypeConverter; import org.elasticsearch.xpack.ql.type.DataTypeRegistry; +import org.elasticsearch.xpack.ql.type.DataTypes; import java.util.Collection; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; + public class EsqlDataTypeRegistry implements DataTypeRegistry { public static final DataTypeRegistry INSTANCE = new EsqlDataTypeRegistry(); @@ -51,6 +55,9 @@ public Object convert(Object value, DataType type) { @Override public DataType commonType(DataType left, DataType right) { + if (isDateTime(left) && isTemporalAmount(right) || isTemporalAmount(left) && isDateTime(right)) { + return DataTypes.DATETIME; + } return DataTypeConverter.commonType(left, right); } } diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java index 982905ed56428..64358a3435e1f 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/type/EsqlDataTypes.java @@ -137,6 +137,14 @@ public static boolean isPrimitive(DataType t) { return t != OBJECT && t != NESTED; } + public static boolean isDateTimeOrTemporal(DataType t) { + return DataTypes.isDateTime(t) || isTemporalAmount(t); + } + + public static boolean isTemporalAmount(DataType t) { + return t == DATE_PERIOD || t == TIME_DURATION; + } + /** * Supported types that can be contained in a block. */ diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java index dbe915538068a..fa14501edc50d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/CsvTests.java @@ -25,7 +25,6 @@ import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.threadpool.FixedExecutorBuilder; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -132,7 +131,7 @@ * * To log the results logResults() should return "true". */ -@TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") +// @TestLogging(value = "org.elasticsearch.xpack.esql:TRACE,org.elasticsearch.compute:TRACE", reason = "debug") public class CsvTests extends ESTestCase { private static final Logger LOGGER = LogManager.getLogger(CsvTests.class); @@ -233,12 +232,12 @@ private void doTest() throws Exception { var expected = loadCsvSpecValues(testCase.expectedResults); var log = logResults() ? LOGGER : null; - assertResults(expected, actualResults, log); + assertResults(expected, actualResults, testCase.ignoreOrder, log); assertWarnings(actualResults.responseHeaders().getOrDefault("Warning", List.of())); } - protected void assertResults(ExpectedResults expected, ActualResults actual, Logger logger) { - CsvAssert.assertResults(expected, actual, logger); + protected void assertResults(ExpectedResults expected, ActualResults actual, boolean ignoreOrder, Logger logger) { + CsvAssert.assertResults(expected, actual, ignoreOrder, logger); /* * Comment the assertion above and enable the next two lines to see the results returned by ES without any assertions being done. * This is useful when creating a new test or trying to figure out what are the actual results. diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java index 9fb12572fd7ce..dd25148c958d0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/action/EsqlQueryRequestTests.java @@ -23,13 +23,16 @@ import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.esql.parser.TypedParamValue; import java.io.IOException; import java.time.ZoneId; +import java.util.ArrayList; import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.function.Supplier; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -42,33 +45,18 @@ public void testParseFields() throws IOException { ZoneId zoneId = randomZone(); Locale locale = randomLocale(random()); QueryBuilder filter = randomQueryBuilder(); - List params = randomList(5, () -> randomBoolean() ? randomInt(100) : randomAlphaOfLength(10)); - StringBuilder paramsString = new StringBuilder(); - paramsString.append("["); - boolean first = true; - for (Object param : params) { - if (first == false) { - paramsString.append(", "); - } - first = false; - if (param instanceof String) { - paramsString.append("\""); - paramsString.append(param); - paramsString.append("\""); - } else { - paramsString.append(param); - } - } - paramsString.append("]"); + + List params = randomParameters(); + boolean hasParams = params.isEmpty() == false; + StringBuilder paramsString = paramsString(params, hasParams); String json = String.format(Locale.ROOT, """ { "query": "%s", "columnar": %s, "time_zone": "%s", "locale": "%s", - "filter": %s, - "params": %s - }""", query, columnar, zoneId, locale.toLanguageTag(), filter, paramsString); + "filter": %s + %s""", query, columnar, zoneId, locale.toLanguageTag(), filter, paramsString); EsqlQueryRequest request = parseEsqlQueryRequest(json); @@ -81,7 +69,7 @@ public void testParseFields() throws IOException { assertEquals(params.size(), request.params().size()); for (int i = 0; i < params.size(); i++) { - assertEquals(params.get(i), request.params().get(i).value); + assertEquals(params.get(i), request.params().get(i)); } } @@ -136,6 +124,65 @@ public void testTask() throws IOException { assertThat(json, equalTo(expected)); } + private List randomParameters() { + if (randomBoolean()) { + return Collections.emptyList(); + } else { + int len = randomIntBetween(1, 10); + List arr = new ArrayList<>(len); + for (int i = 0; i < len; i++) { + boolean hasExplicitType = randomBoolean(); + @SuppressWarnings("unchecked") + Supplier supplier = randomFrom( + () -> new TypedParamValue("boolean", randomBoolean(), hasExplicitType), + () -> new TypedParamValue("integer", randomInt(), hasExplicitType), + () -> new TypedParamValue("long", randomLong(), hasExplicitType), + () -> new TypedParamValue("double", randomDouble(), hasExplicitType), + () -> new TypedParamValue("null", null, hasExplicitType), + () -> new TypedParamValue("keyword", randomAlphaOfLength(10), hasExplicitType) + ); + arr.add(supplier.get()); + } + return Collections.unmodifiableList(arr); + } + } + + private StringBuilder paramsString(List params, boolean hasParams) { + StringBuilder paramsString = new StringBuilder(); + if (hasParams) { + paramsString.append(",\"params\":["); + boolean first = true; + for (TypedParamValue param : params) { + if (first == false) { + paramsString.append(", "); + } + first = false; + if (param.hasExplicitType()) { + paramsString.append("{\"type\":\""); + paramsString.append(param.type); + paramsString.append("\",\"value\":"); + } + switch (param.type) { + case "keyword" -> { + paramsString.append("\""); + paramsString.append(param.value); + paramsString.append("\""); + } + case "integer", "long", "boolean", "null", "double" -> { + paramsString.append(param.value); + } + } + if (param.hasExplicitType()) { + paramsString.append("}"); + } + } + paramsString.append("]}"); + } else { + paramsString.append("}"); + } + return paramsString; + } + private static void assertParserErrorMessage(String json, String message) { Exception e = expectThrows(IllegalArgumentException.class, () -> parseEsqlQueryRequest(json)); assertThat(e.getMessage(), containsString(message)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java index 76fb5fe57e1e2..0d0f57b02b9fe 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/AnalyzerTests.java @@ -212,13 +212,13 @@ public void testProjectStar() { assertProjection(""" from test | keep * - """, "_meta_field", "emp_no", "first_name", "gender", "languages", "last_name", "salary"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary"); } public void testNoProjection() { assertProjection(""" from test - """, "_meta_field", "emp_no", "first_name", "gender", "languages", "last_name", "salary"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary"); assertProjectionTypes( """ from test @@ -227,6 +227,8 @@ public void testNoProjection() { DataTypes.INTEGER, DataTypes.KEYWORD, DataTypes.TEXT, + DataTypes.TEXT, + DataTypes.KEYWORD, DataTypes.INTEGER, DataTypes.KEYWORD, DataTypes.INTEGER @@ -237,7 +239,7 @@ public void testProjectOrder() { assertProjection(""" from test | keep first_name, *, last_name - """, "first_name", "_meta_field", "emp_no", "gender", "languages", "salary", "last_name"); + """, "first_name", "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary", "last_name"); } public void testProjectThenDropName() { @@ -269,21 +271,21 @@ public void testProjectDropPattern() { from test | keep * | drop *_name - """, "_meta_field", "emp_no", "gender", "languages", "salary"); + """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary"); } public void testProjectDropNoStarPattern() { assertProjection(""" from test | drop *_name - """, "_meta_field", "emp_no", "gender", "languages", "salary"); + """, "_meta_field", "emp_no", "gender", "job", "job.raw", "languages", "salary"); } public void testProjectOrderPatternWithRest() { assertProjection(""" from test | keep *name, *, emp_no - """, "first_name", "last_name", "_meta_field", "gender", "languages", "salary", "emp_no"); + """, "first_name", "last_name", "_meta_field", "gender", "job", "job.raw", "languages", "salary", "emp_no"); } public void testProjectDropPatternAndKeepOthers() { @@ -420,7 +422,7 @@ public void testDropPatternUnsupportedFields() { assertProjection(""" from test | drop *ala* - """, "_meta_field", "emp_no", "first_name", "gender", "languages", "last_name"); + """, "_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name"); } public void testDropUnsupportedPattern() { @@ -488,7 +490,7 @@ public void testRenameReuseAlias() { assertProjection(""" from test | rename emp_no as e, first_name as e - """, "_meta_field", "e", "gender", "languages", "last_name", "salary"); + """, "_meta_field", "e", "gender", "job", "job.raw", "languages", "last_name", "salary"); } public void testRenameUnsupportedField() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java index 1dd7661a1b74b..8740b04298c23 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/analysis/VerifierTests.java @@ -250,6 +250,25 @@ public void testPeriodAndDurationInRowAssignment() { } } + public void testSubtractDateTimeFromTemporal() { + for (var unit : List.of("millisecond", "second", "minute", "hour")) { + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] from a [TIME_DURATION] amount [1 " + + unit + + "]", + error("row 1 " + unit + " - now() ") + ); + } + for (var unit : List.of("day", "week", "month", "year")) { + assertEquals( + "1:5: [-] arguments are in unsupported order: cannot subtract a [DATETIME] value [now()] from a [DATE_PERIOD] amount [1 " + + unit + + "]", + error("row 1 " + unit + " - now() ") + ); + } + } + private String error(String query) { return error(query, defaultAnalyzer); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java index e6dce24503755..a1a5e1aa60d19 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/AbstractFunctionTestCase.java @@ -33,21 +33,17 @@ import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.EsField; -import org.elasticsearch.xpack.ql.util.NumericUtils; import org.elasticsearch.xpack.versionfield.Version; -import org.hamcrest.Matcher; import org.junit.After; import org.junit.AfterClass; import org.junit.BeforeClass; import java.io.IOException; -import java.math.BigInteger; import java.nio.file.Files; import java.nio.file.Path; import java.time.Duration; import java.time.Period; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -59,12 +55,6 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.function.DoubleBinaryOperator; -import java.util.function.DoubleFunction; -import java.util.function.DoubleUnaryOperator; -import java.util.function.Function; -import java.util.function.IntFunction; -import java.util.function.LongFunction; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -81,364 +71,6 @@ */ public abstract class AbstractFunctionTestCase extends ESTestCase { - /** - * Holds a data value and the intended parse type of that value - * @param data - value to test against - * @param type - type of the value, for building expressions - */ - public record TypedData(Object data, DataType type, String name) { - public TypedData(Object data, String name) { - this(data, EsqlDataTypes.fromJava(data), name); - } - } - - public static class TestCase { - /** - * The {@link Source} this test case should be run with - */ - private Source source; - /** - * The parameter values and types to pass into the function for this test run - */ - private List data; - - /** - * The expected toString output for the evaluator this fuction invocation should generate - */ - String evaluatorToString; - /** - * The expected output type for the case being tested - */ - DataType expectedType; - /** - * A matcher to validate the output of the function run on the given input data - */ - private Matcher matcher; - - /** - * Warnings this test is expected to produce - */ - private String[] expectedWarnings; - - private final String expectedTypeError; - - public TestCase(List data, String evaluatorToString, DataType expectedType, Matcher matcher) { - this(data, evaluatorToString, expectedType, matcher, null, null); - } - - public static TestCase typeError(List data, String expectedTypeError) { - return new TestCase(data, null, null, null, null, expectedTypeError); - } - - private TestCase( - List data, - String evaluatorToString, - DataType expectedType, - Matcher matcher, - String[] expectedWarnings, - String expectedTypeError - ) { - this.source = Source.EMPTY; - this.data = data; - this.evaluatorToString = evaluatorToString; - this.expectedType = expectedType; - this.matcher = matcher; - this.expectedWarnings = expectedWarnings; - this.expectedTypeError = expectedTypeError; - } - - public Source getSource() { - return source; - } - - public List getData() { - return data; - } - - public List getDataAsFields() { - return data.stream().map(t -> field(t.name(), t.type())).collect(Collectors.toList()); - } - - public List getDataAsLiterals() { - return data.stream().map(t -> new Literal(Source.synthetic(t.name()), t.data(), t.type())).collect(Collectors.toList()); - } - - public List getDataValues() { - return data.stream().map(t -> t.data()).collect(Collectors.toList()); - } - - public Matcher getMatcher() { - return matcher; - } - - public TestCase withWarning(String warning) { - String[] newWarnings; - if (expectedWarnings != null) { - newWarnings = Arrays.copyOf(expectedWarnings, expectedWarnings.length + 1); - newWarnings[expectedWarnings.length] = warning; - } else { - newWarnings = new String[] { warning }; - } - return new TestCase(data, evaluatorToString, expectedType, matcher, newWarnings, expectedTypeError); - } - } - - /** - * This class exists to give a human-readable string representation of the test case. - */ - public record TestCaseSupplier(String name, List types, Supplier supplier) implements Supplier { - /** - * Build a test case without types. - * @deprecated Supply types - */ - @Deprecated - public TestCaseSupplier(String name, Supplier supplier) { - this(name, null, supplier); - } - - /** - * Build a test case named after the types it takes. - */ - public TestCaseSupplier(List types, Supplier supplier) { - this(nameFromTypes(types), types, supplier); - } - - static String nameFromTypes(List types) { - return types.stream().map(t -> "<" + t.typeName() + ">").collect(Collectors.joining(", ")); - } - - @Override - public TestCase get() { - TestCase supplied = supplier.get(); - if (types != null) { - for (int i = 0; i < types.size(); i++) { - if (supplied.data.get(i).type != types.get(i)) { - throw new IllegalStateException("supplier/data type mismatch " + supplied.data.get(i).type + "/" + types.get(i)); - } - } - } - return supplied; - } - - @Override - public String toString() { - return name; - } - - /** - * Generate positive test cases for unary functions that operate on an {@code numeric} - * fields by casting them to {@link DataTypes#DOUBLE}s. - */ - public static List forUnaryCastingToDouble(String name, String argName, DoubleUnaryOperator expected) { - String read = "Attribute[channel=0]"; - String eval = name + "[" + argName + "="; - List suppliers = new ArrayList<>(); - forUnaryInt( - suppliers, - eval + castToDoubleEvaluator(read, DataTypes.INTEGER) + "]", - DataTypes.DOUBLE, - i -> expected.applyAsDouble(i) - ); - forUnaryLong( - suppliers, - eval + castToDoubleEvaluator(read, DataTypes.LONG) + "]", - DataTypes.DOUBLE, - l -> expected.applyAsDouble(l) - ); - forUnaryUnsignedLong( - suppliers, - eval + castToDoubleEvaluator(read, DataTypes.UNSIGNED_LONG) + "]", - DataTypes.DOUBLE, - ul -> expected.applyAsDouble(ul.doubleValue()) - ); - forUnaryDouble(suppliers, eval + read + "]", DataTypes.DOUBLE, i -> expected.applyAsDouble(i)); - return suppliers; - } - - /** - * Generate positive test cases for binary functions that operate on an {@code numeric} - * fields by casting them to {@link DataTypes#DOUBLE}s. - */ - public static List forBinaryCastingToDouble( - String name, - String lhsName, - String rhsName, - DoubleBinaryOperator expected - ) { - List suppliers = new ArrayList<>(); - for (DataType lhsType : EsqlDataTypes.types()) { - if (lhsType.isNumeric() == false || EsqlDataTypes.isRepresentable(lhsType) == false) { - continue; - } - for (Map.Entry> lhsSupplier : RANDOM_VALUE_SUPPLIERS.get(lhsType)) { - for (DataType rhsType : EsqlDataTypes.types()) { - if (rhsType.isNumeric() == false || EsqlDataTypes.isRepresentable(rhsType) == false) { - continue; - } - for (Map.Entry> rhsSupplier : RANDOM_VALUE_SUPPLIERS.get(rhsType)) { - String caseName = lhsSupplier.getKey() + ", " + rhsSupplier.getKey(); - suppliers.add(new TestCaseSupplier(caseName, List.of(lhsType, rhsType), () -> { - Number lhs = (Number) lhsSupplier.getValue().get(); - Number rhs = (Number) rhsSupplier.getValue().get(); - TypedData lhsTyped = new TypedData( - // TODO there has to be a better way to handle unsigned long - lhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : lhs, - lhsType, - "lhs" - ); - TypedData rhsTyped = new TypedData( - rhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : rhs, - rhsType, - "rhs" - ); - String lhsEvalName = castToDoubleEvaluator("Attribute[channel=0]", lhsType); - String rhsEvalName = castToDoubleEvaluator("Attribute[channel=1]", rhsType); - return new TestCase( - List.of(lhsTyped, rhsTyped), - name + "[" + lhsName + "=" + lhsEvalName + ", " + rhsName + "=" + rhsEvalName + "]", - DataTypes.DOUBLE, - equalTo(expected.applyAsDouble(lhs.doubleValue(), rhs.doubleValue())) - ); - })); - } - } - } - } - return suppliers; - } - - /** - * Generate positive test cases for a unary function operating on an {@link DataTypes#INTEGER}. - */ - public static void forUnaryInt( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - IntFunction expectedValue - ) { - unaryNumeric(suppliers, expectedEvaluatorToString, DataTypes.INTEGER, expectedType, n -> expectedValue.apply(n.intValue())); - } - - /** - * Generate positive test cases for a unary function operating on an {@link DataTypes#LONG}. - */ - public static void forUnaryLong( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - LongFunction expectedValue - ) { - unaryNumeric(suppliers, expectedEvaluatorToString, DataTypes.LONG, expectedType, n -> expectedValue.apply(n.longValue())); - } - - /** - * Generate positive test cases for a unary function operating on an {@link DataTypes#UNSIGNED_LONG}. - */ - public static void forUnaryUnsignedLong( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - Function expectedValue - ) { - unaryNumeric( - suppliers, - expectedEvaluatorToString, - DataTypes.UNSIGNED_LONG, - expectedType, - n -> expectedValue.apply((BigInteger) n) - ); - } - - /** - * Generate positive test cases for a unary function operating on an {@link DataTypes#DOUBLE}. - */ - public static void forUnaryDouble( - List suppliers, - String expectedEvaluatorToString, - DataType expectedType, - DoubleFunction expectedValue - ) { - unaryNumeric(suppliers, expectedEvaluatorToString, DataTypes.DOUBLE, expectedType, n -> expectedValue.apply(n.doubleValue())); - } - - private static void unaryNumeric( - List suppliers, - String expectedEvaluatorToString, - DataType inputType, - DataType expectedOutputType, - Function expected - ) { - for (Map.Entry> supplier : RANDOM_VALUE_SUPPLIERS.get(inputType)) { - suppliers.add(new TestCaseSupplier(supplier.getKey(), List.of(inputType), () -> { - Number value = (Number) supplier.getValue().get(); - TypedData typed = new TypedData( - // TODO there has to be a better way to handle unsigned long - value instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : value, - inputType, - "value" - ); - return new TestCase(List.of(typed), expectedEvaluatorToString, expectedOutputType, equalTo(expected.apply(value))); - })); - } - } - - private static final Map>>> RANDOM_VALUE_SUPPLIERS = Map.ofEntries( - Map.entry( - DataTypes.DOUBLE, - List.of( - Map.entry("<0 double>", () -> 0.0d), - Map.entry("", () -> randomDouble()), - Map.entry("", () -> -randomDouble()), - Map.entry("", () -> randomDoubleBetween(0, Double.MAX_VALUE, false)), - Map.entry("", () -> randomDoubleBetween(Double.MIN_VALUE, 0 - Double.MIN_NORMAL, true)) - ) - ), - Map.entry( - DataTypes.LONG, - List.of( - Map.entry("<0 long>", () -> 0L), - Map.entry("", () -> randomLongBetween(1, Long.MAX_VALUE)), - Map.entry("", () -> randomLongBetween(Long.MIN_VALUE, -1)) - ) - ), - Map.entry( - DataTypes.INTEGER, - List.of( - Map.entry("<0 int>", () -> 0), - Map.entry("", () -> between(1, Integer.MAX_VALUE)), - Map.entry("", () -> between(Integer.MIN_VALUE, -1)) - ) - ), - Map.entry( - DataTypes.UNSIGNED_LONG, - List.of( - Map.entry("<0 unsigned long>", () -> BigInteger.ZERO), - Map.entry("", () -> BigInteger.valueOf(randomLongBetween(1, Integer.MAX_VALUE))), - Map.entry( - "", - () -> BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.valueOf(randomLongBetween(1, Integer.MAX_VALUE))) - ) - ) - ) - ); - - private static String castToDoubleEvaluator(String original, DataType current) { - if (current == DataTypes.DOUBLE) { - return original; - } - if (current == DataTypes.INTEGER) { - return "CastIntToDoubleEvaluator[v=" + original + "]"; - } - if (current == DataTypes.LONG) { - return "CastLongToDoubleEvaluator[v=" + original + "]"; - } - if (current == DataTypes.UNSIGNED_LONG) { - return "CastUnsignedLongToDoubleEvaluator[v=" + original + "]"; - } - throw new UnsupportedOperationException(); - } - } - /** * Generate a random value of the appropriate type to fit into blocks of {@code e}. */ @@ -464,7 +96,7 @@ public static Literal randomLiteral(DataType type) { }, type); } - protected TestCase testCase; + protected TestCaseSupplier.TestCase testCase; protected static Iterable parameterSuppliersFromTypedData(List cases) { // TODO rename this method to something more descriptive. Javadoc. And make sure all parameters are "representable" types. @@ -488,11 +120,11 @@ protected static FieldAttribute field(String name, DataType type) { */ protected abstract Expression build(Source source, List args); - protected final Expression buildFieldExpression(TestCase testCase) { + protected final Expression buildFieldExpression(TestCaseSupplier.TestCase testCase) { return build(testCase.getSource(), testCase.getDataAsFields()); } - protected final Expression buildLiteralExpression(TestCase testCase) { + protected final Expression buildLiteralExpression(TestCaseSupplier.TestCase testCase) { return build(testCase.getSource(), testCase.getDataAsLiterals()); } @@ -517,7 +149,7 @@ protected final Page row(List values) { */ protected void buildLayout(Layout.Builder builder, Expression e) { if (e instanceof FieldAttribute f) { - builder.appendChannel(f.id()); + builder.append(f); return; } for (Expression c : e.children()) { @@ -531,10 +163,11 @@ protected final void assertResolveTypeValid(Expression expression, DataType expe } public final void testEvaluate() { + assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); Expression expression = buildFieldExpression(testCase); - if (testCase.expectedTypeError != null) { + if (testCase.getExpectedTypeError() != null) { assertTrue("expected unresolved", expression.typeResolved().unresolved()); - assertThat(expression.typeResolved().message(), equalTo(testCase.expectedTypeError)); + assertThat(expression.typeResolved().message(), equalTo(testCase.getExpectedTypeError())); return; } assertFalse("expected resolved", expression.typeResolved().unresolved()); @@ -543,13 +176,14 @@ public final void testEvaluate() { // TODO should we convert unsigned_long into BigDecimal so it's easier to assert? Object result = toJavaObject(evaluator(expression).get().eval(row(testCase.getDataValues())), 0); assertThat(result, testCase.getMatcher()); - if (testCase.expectedWarnings != null) { - assertWarnings(testCase.expectedWarnings); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); } } public final void testSimpleWithNulls() { // TODO replace this with nulls inserted into the test case like anyNullIsNull - assumeTrue("nothing to do if a type error", testCase.expectedTypeError == null); + assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); + assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); List simpleData = testCase.getDataValues(); EvalOperator.ExpressionEvaluator eval = evaluator(buildFieldExpression(testCase)).get(); Block[] orig = BlockUtils.fromListRow(simpleData); @@ -575,7 +209,8 @@ protected void assertSimpleWithNulls(List data, Block value, int nullBlo } public final void testEvaluateInManyThreads() throws ExecutionException, InterruptedException { - assumeTrue("nothing to do if a type error", testCase.expectedTypeError == null); + assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); + assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); int count = 10_000; int threads = 5; Supplier evalSupplier = evaluator(buildFieldExpression(testCase)); @@ -602,7 +237,8 @@ public final void testEvaluateInManyThreads() throws ExecutionException, Interru } public final void testEvaluatorToString() { - assumeTrue("nothing to do if a type error", testCase.expectedTypeError == null); + assumeTrue("nothing to do if a type error", testCase.getExpectedTypeError() == null); + assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); var supplier = evaluator(buildFieldExpression(testCase)); var ev = supplier.get(); assertThat(ev.toString(), equalTo(testCase.evaluatorToString)); @@ -610,9 +246,9 @@ public final void testEvaluatorToString() { public final void testFold() { Expression expression = buildLiteralExpression(testCase); - if (testCase.expectedTypeError != null) { + if (testCase.getExpectedTypeError() != null) { assertTrue(expression.typeResolved().unresolved()); - assertThat(expression.typeResolved().message(), equalTo(testCase.expectedTypeError)); + assertThat(expression.typeResolved().message(), equalTo(testCase.getExpectedTypeError())); return; } assertFalse(expression.typeResolved().unresolved()); @@ -620,12 +256,13 @@ public final void testFold() { assertThat(expression.dataType(), equalTo(testCase.expectedType)); assertTrue(expression.foldable()); assertThat(expression.fold(), testCase.getMatcher()); - if (testCase.expectedWarnings != null) { - assertWarnings(testCase.expectedWarnings); + if (testCase.getExpectedWarnings() != null) { + assertWarnings(testCase.getExpectedWarnings()); } } public void testSerializationOfSimple() { + assumeTrue("All test data types must be representable in order to build fields", testCase.allTypesAreRepresentable()); assertSerialization(buildFieldExpression(testCase)); } @@ -643,7 +280,7 @@ public void testSerializationOfSimple() { */ protected static List anyNullIsNull(boolean entirelyNullPreservesType, List testCaseSuppliers) { for (TestCaseSupplier s : testCaseSuppliers) { - if (s.types == null) { + if (s.types() == null) { throw new IllegalArgumentException("types required"); } } @@ -661,50 +298,50 @@ protected static List anyNullIsNull(boolean entirelyNullPreser */ Set> uniqueSignatures = new HashSet<>(); for (TestCaseSupplier original : testCaseSuppliers) { - boolean firstTimeSeenSignature = uniqueSignatures.add(original.types); - for (int nullPosition = 0; nullPosition < original.types.size(); nullPosition++) { + boolean firstTimeSeenSignature = uniqueSignatures.add(original.types()); + for (int nullPosition = 0; nullPosition < original.types().size(); nullPosition++) { int finalNullPosition = nullPosition; - suppliers.add(new TestCaseSupplier(original.name + " null in " + nullPosition, original.types, () -> { - TestCase oc = original.get(); - List data = IntStream.range(0, oc.data.size()).mapToObj(i -> { - TypedData od = oc.data.get(i); + suppliers.add(new TestCaseSupplier(original.name() + " null in " + nullPosition, original.types(), () -> { + TestCaseSupplier.TestCase oc = original.get(); + List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { + TestCaseSupplier.TypedData od = oc.getData().get(i); if (i == finalNullPosition) { - return new TypedData(null, od.type, od.name); + return new TestCaseSupplier.TypedData(null, od.type(), od.name()); } return od; }).toList(); - return new TestCase( + return new TestCaseSupplier.TestCase( data, oc.evaluatorToString, oc.expectedType, nullValue(), - oc.expectedWarnings, - oc.expectedTypeError + oc.getExpectedWarnings(), + oc.getExpectedTypeError() ); })); if (firstTimeSeenSignature) { - List typesWithNull = IntStream.range(0, original.types.size()) - .mapToObj(i -> i == finalNullPosition ? DataTypes.NULL : original.types.get(i)) + List typesWithNull = IntStream.range(0, original.types().size()) + .mapToObj(i -> i == finalNullPosition ? DataTypes.NULL : original.types().get(i)) .toList(); boolean newSignature = uniqueSignatures.add(typesWithNull); if (newSignature) { suppliers.add(new TestCaseSupplier(typesWithNull, () -> { - TestCase oc = original.get(); - List data = IntStream.range(0, oc.data.size()).mapToObj(i -> { - TypedData od = oc.data.get(i); + TestCaseSupplier.TestCase oc = original.get(); + List data = IntStream.range(0, oc.getData().size()).mapToObj(i -> { + TestCaseSupplier.TypedData od = oc.getData().get(i); if (i == finalNullPosition) { - return new TypedData(null, DataTypes.NULL, od.name); + return new TestCaseSupplier.TypedData(null, DataTypes.NULL, od.name()); } return od; }).toList(); - return new TestCase( + return new TestCaseSupplier.TestCase( data, "LiteralsEvaluator[block=null]", - entirelyNullPreservesType == false && oc.data.size() == 1 ? DataTypes.NULL : oc.expectedType, + entirelyNullPreservesType == false && oc.getData().size() == 1 ? DataTypes.NULL : oc.expectedType, nullValue(), - oc.expectedWarnings, - oc.expectedTypeError + oc.getExpectedWarnings(), + oc.getExpectedTypeError() ); })); } @@ -722,7 +359,7 @@ protected static List anyNullIsNull(boolean entirelyNullPreser */ protected static List errorsForCasesWithoutExamples(List testCaseSuppliers) { for (TestCaseSupplier s : testCaseSuppliers) { - if (s.types == null) { + if (s.types() == null) { throw new IllegalArgumentException("types required"); } } @@ -793,8 +430,8 @@ private static TestCaseSupplier typeErrorSupplier(List> validPerPo return new TestCaseSupplier( "type error for " + TestCaseSupplier.nameFromTypes(types), types, - () -> TestCase.typeError( - types.stream().map(type -> new TypedData(randomLiteral(type).value(), type, type.typeName())).toList(), + () -> TestCaseSupplier.TestCase.typeError( + types.stream().map(type -> new TestCaseSupplier.TypedData(randomLiteral(type).value(), type, type.typeName())).toList(), typeErrorMessage(validPerPosition, types) ) ); @@ -847,6 +484,9 @@ private static Stream representable() { @AfterClass public static void renderSignature() throws IOException { + if (System.getProperty("generateDocs") == null) { + return; + } FunctionDefinition definition = definition(); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering signature because the function isn't registered"); @@ -882,17 +522,20 @@ public static void clearSignatures() { @After public void trackSignature() { - if (testCase.expectedTypeError != null) { + if (testCase.getExpectedTypeError() != null) { return; } - if (testCase.getData().stream().anyMatch(t -> t.type == DataTypes.NULL)) { + if (testCase.getData().stream().anyMatch(t -> t.type() == DataTypes.NULL)) { return; } - signatures.putIfAbsent(testCase.getData().stream().map(TypedData::type).toList(), testCase.expectedType); + signatures.putIfAbsent(testCase.getData().stream().map(TestCaseSupplier.TypedData::type).toList(), testCase.expectedType); } @AfterClass public static void renderTypesTable() throws IOException { + if (System.getProperty("generateDocs") == null) { + return; + } FunctionDefinition definition = definition(); if (definition == null) { LogManager.getLogger(getTestClass()).info("Skipping rendering types because the function isn't registered"); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java index e1d9c7bfed1f5..06ff4e4d80fe2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/RailRoadDiagram.java @@ -19,6 +19,7 @@ import net.nextencia.rrdiagram.grammar.rrdiagram.RRElement; import net.nextencia.rrdiagram.grammar.rrdiagram.RRText; +import org.elasticsearch.common.util.LazyInitializable; import org.elasticsearch.xpack.esql.plan.logical.show.ShowFunctions; import org.elasticsearch.xpack.ql.expression.function.FunctionDefinition; @@ -40,9 +41,9 @@ public class RailRoadDiagram { * on whatever fonts you have installed. And, since the world can't agree * on fonts, that'd be chaos. So, instead, we load Roboto Mono. */ - private static final Font FONT = loadFont().deriveFont(20.0F); + private static final LazyInitializable FONT = new LazyInitializable<>(() -> loadFont().deriveFont(20.0F)); - static String functionSignature(FunctionDefinition definition) { + static String functionSignature(FunctionDefinition definition) throws IOException { List expressions = new ArrayList<>(); expressions.add(new SpecialSequence(definition.name().toUpperCase(Locale.ROOT))); expressions.add(new Syntax("(")); @@ -68,12 +69,12 @@ static String functionSignature(FunctionDefinition definition) { RRDiagramToSVG toSvg = new RRDiagramToSVG(); toSvg.setSpecialSequenceShape(RRDiagramToSVG.BoxShape.RECTANGLE); - toSvg.setSpecialSequenceFont(FONT); + toSvg.setSpecialSequenceFont(FONT.getOrCompute()); toSvg.setLiteralFillColor(toSvg.getSpecialSequenceFillColor()); - toSvg.setLiteralFont(FONT); + toSvg.setLiteralFont(FONT.getOrCompute()); - toSvg.setRuleFont(FONT); + toSvg.setRuleFont(FONT.getOrCompute()); /* * "Tighten" the styles in the SVG so they beat the styles sitting in the * main page. We need this because we're embedding the SVG into the page. @@ -143,7 +144,7 @@ public void addElement(String element) { } } - private static Font loadFont() { + private static Font loadFont() throws IOException { try { InputStream woff = RailRoadDiagram.class.getClassLoader() .getResourceAsStream("META-INF/resources/webjars/fontsource__roboto-mono/4.5.7/files/roboto-mono-latin-400-normal.woff"); @@ -152,9 +153,7 @@ private static Font loadFont() { } return Font.createFont(Font.TRUETYPE_FONT, new WoffConverter().convertToTTFOutputStream(woff)); } catch (FontFormatException e) { - throw new RuntimeException(e); - } catch (IOException e) { - throw new RuntimeException(e); + throw new IOException(e); } } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java new file mode 100644 index 0000000000000..f564cadff3477 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/TestCaseSupplier.java @@ -0,0 +1,590 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.elasticsearch.xpack.ql.util.NumericUtils; +import org.hamcrest.Matcher; + +import java.math.BigInteger; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.function.DoubleBinaryOperator; +import java.util.function.DoubleFunction; +import java.util.function.DoubleUnaryOperator; +import java.util.function.Function; +import java.util.function.IntFunction; +import java.util.function.LongFunction; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import static org.hamcrest.Matchers.equalTo; + +/** + * This class exists to give a human-readable string representation of the test case. + */ +public record TestCaseSupplier(String name, List types, Supplier supplier) + implements + Supplier { + + public static final BigInteger MAX_UNSIGNED_LONG = BigInteger.valueOf(1 << 64).subtract(BigInteger.ONE); + /** + * Build a test case without types. + * + * @deprecated Supply types + */ + @Deprecated + public TestCaseSupplier(String name, Supplier supplier) { + this(name, null, supplier); + } + + /** + * Build a test case named after the types it takes. + */ + public TestCaseSupplier(List types, Supplier supplier) { + this(nameFromTypes(types), types, supplier); + } + + static String nameFromTypes(List types) { + return types.stream().map(t -> "<" + t.typeName() + ">").collect(Collectors.joining(", ")); + } + + @Override + public TestCase get() { + TestCase supplied = supplier.get(); + if (types != null) { + for (int i = 0; i < types.size(); i++) { + if (supplied.getData().get(i).type() != types.get(i)) { + throw new IllegalStateException("supplier/data type mismatch " + supplied.getData().get(i).type() + "/" + types.get(i)); + } + } + } + return supplied; + } + + @Override + public String toString() { + return name; + } + + /** + * Generate positive test cases for unary functions that operate on an {@code numeric} + * fields by casting them to {@link DataTypes#DOUBLE}s. + */ + public static List forUnaryCastingToDouble(String name, String argName, DoubleUnaryOperator expected) { + String read = "Attribute[channel=0]"; + String eval = name + "[" + argName + "="; + List suppliers = new ArrayList<>(); + forUnaryInt( + suppliers, + eval + castToDoubleEvaluator(read, DataTypes.INTEGER) + "]", + DataTypes.DOUBLE, + i -> expected.applyAsDouble(i), + Integer.MIN_VALUE, + Integer.MAX_VALUE + ); + forUnaryLong( + suppliers, + eval + castToDoubleEvaluator(read, DataTypes.LONG) + "]", + DataTypes.DOUBLE, + l -> expected.applyAsDouble(l), + Long.MIN_VALUE, + Long.MAX_VALUE + ); + forUnaryUnsignedLong( + suppliers, + eval + castToDoubleEvaluator(read, DataTypes.UNSIGNED_LONG) + "]", + DataTypes.DOUBLE, + ul -> expected.applyAsDouble(ul.doubleValue()), + BigInteger.ZERO, + MAX_UNSIGNED_LONG + ); + forUnaryDouble( + suppliers, + eval + read + "]", + DataTypes.DOUBLE, + i -> expected.applyAsDouble(i), + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY + ); + return suppliers; + } + + /** + * Generate positive test cases for binary functions that operate on an {@code numeric} + * fields by casting them to {@link DataTypes#DOUBLE}s. + */ + public static List forBinaryCastingToDouble( + String name, + String lhsName, + String rhsName, + DoubleBinaryOperator expected + ) { + List suppliers = new ArrayList<>(); + for (DataType lhsType : EsqlDataTypes.types()) { + if (lhsType.isNumeric() == false || EsqlDataTypes.isRepresentable(lhsType) == false) { + continue; + } + for (Map.Entry> lhsSupplier : RANDOM_VALUE_SUPPLIERS.get(lhsType)) { + for (DataType rhsType : EsqlDataTypes.types()) { + if (rhsType.isNumeric() == false || EsqlDataTypes.isRepresentable(rhsType) == false) { + continue; + } + for (Map.Entry> rhsSupplier : RANDOM_VALUE_SUPPLIERS.get(rhsType)) { + String caseName = lhsSupplier.getKey() + ", " + rhsSupplier.getKey(); + suppliers.add(new TestCaseSupplier(caseName, List.of(lhsType, rhsType), () -> { + Number lhs = (Number) lhsSupplier.getValue().get(); + Number rhs = (Number) rhsSupplier.getValue().get(); + TypedData lhsTyped = new TypedData( + // TODO there has to be a better way to handle unsigned long + lhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : lhs, + lhsType, + "lhs" + ); + TypedData rhsTyped = new TypedData( + rhs instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : rhs, + rhsType, + "rhs" + ); + String lhsEvalName = castToDoubleEvaluator("Attribute[channel=0]", lhsType); + String rhsEvalName = castToDoubleEvaluator("Attribute[channel=1]", rhsType); + return new TestCase( + List.of(lhsTyped, rhsTyped), + name + "[" + lhsName + "=" + lhsEvalName + ", " + rhsName + "=" + rhsEvalName + "]", + DataTypes.DOUBLE, + equalTo(expected.applyAsDouble(lhs.doubleValue(), rhs.doubleValue())) + ); + })); + } + } + } + } + return suppliers; + } + + /** + * Generate positive test cases for a unary function operating on an {@link DataTypes#INTEGER}. + */ + public static void forUnaryInt( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + IntFunction expectedValue, + int lowerBound, + int upperBound + ) { + unaryNumeric( + suppliers, + expectedEvaluatorToString, + DataTypes.INTEGER, + intCases(lowerBound, upperBound), + expectedType, + n -> expectedValue.apply(n.intValue()) + ); + } + + /** + * Generate positive test cases for a unary function operating on an {@link DataTypes#LONG}. + */ + public static void forUnaryLong( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + LongFunction expectedValue, + long lowerBound, + long upperBound + ) { + unaryNumeric( + suppliers, + expectedEvaluatorToString, + DataTypes.LONG, + longCases(lowerBound, upperBound), + expectedType, + n -> expectedValue.apply(n.longValue()) + ); + } + + /** + * Generate positive test cases for a unary function operating on an {@link DataTypes#UNSIGNED_LONG}. + */ + public static void forUnaryUnsignedLong( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + Function expectedValue, + BigInteger lowerBound, + BigInteger upperBound + ) { + unaryNumeric( + suppliers, + expectedEvaluatorToString, + DataTypes.UNSIGNED_LONG, + ulongCases(lowerBound, upperBound), + expectedType, + n -> expectedValue.apply((BigInteger) n) + ); + } + + /** + * Generate positive test cases for a unary function operating on an {@link DataTypes#DOUBLE}. + */ + public static void forUnaryDouble( + List suppliers, + String expectedEvaluatorToString, + DataType expectedType, + DoubleFunction expectedValue, + double lowerBound, + double upperBound + ) { + unaryNumeric( + suppliers, + expectedEvaluatorToString, + DataTypes.DOUBLE, + doubleCases(lowerBound, upperBound), + expectedType, + n -> expectedValue.apply(n.doubleValue()) + ); + } + + private static void unaryNumeric( + List suppliers, + String expectedEvaluatorToString, + DataType inputType, + List>> valueSuppliers, + DataType expectedOutputType, + Function expected + ) { + for (Map.Entry> supplier : valueSuppliers) { + suppliers.add(new TestCaseSupplier(supplier.getKey(), List.of(inputType), () -> { + Number value = (Number) supplier.getValue().get(); + TypedData typed = new TypedData( + // TODO there has to be a better way to handle unsigned long + value instanceof BigInteger b ? NumericUtils.asLongUnsigned(b) : value, + inputType, + "value" + ); + return new TestCase(List.of(typed), expectedEvaluatorToString, expectedOutputType, equalTo(expected.apply(value))); + })); + } + } + + private static List>> intCases(int min, int max) { + List>> cases = new ArrayList<>(); + if (0 <= max && 0 >= min) { + cases.add(Map.entry("<0 int>", () -> 0)); + } + + int lower = Math.max(min, 1); + int upper = Math.min(max, Integer.MAX_VALUE); + if (lower < upper) { + cases.add(Map.entry("", () -> ESTestCase.randomIntBetween(lower, upper))); + } else if (lower == upper) { + cases.add(Map.entry("<" + lower + " int>", () -> lower)); + } + + int lower1 = Math.max(min, Integer.MIN_VALUE); + int upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add(Map.entry("", () -> ESTestCase.randomIntBetween(lower1, upper1))); + } else if (lower1 == upper1) { + cases.add(Map.entry("<" + lower1 + " int>", () -> lower1)); + } + return cases; + } + + private static List>> longCases(long min, long max) { + List>> cases = new ArrayList<>(); + if (0L <= max && 0L >= min) { + cases.add(Map.entry("<0 long>", () -> 0L)); + } + + long lower = Math.max(min, 1); + long upper = Math.min(max, Long.MAX_VALUE); + if (lower < upper) { + cases.add(Map.entry("", () -> ESTestCase.randomLongBetween(lower, upper))); + } else if (lower == upper) { + cases.add(Map.entry("<" + lower + " long>", () -> lower)); + } + + long lower1 = Math.max(min, Long.MIN_VALUE); + long upper1 = Math.min(max, -1); + if (lower1 < upper1) { + cases.add(Map.entry("", () -> ESTestCase.randomLongBetween(lower1, upper1))); + } else if (lower1 == upper1) { + cases.add(Map.entry("<" + lower1 + " long>", () -> lower1)); + } + + return cases; + } + + private static List>> ulongCases(BigInteger min, BigInteger max) { + List>> cases = new ArrayList<>(); + + // Zero + if (BigInteger.ZERO.compareTo(max) <= 0 && BigInteger.ZERO.compareTo(min) >= 0) { + cases.add(Map.entry("<0 unsigned long>", () -> BigInteger.ZERO)); + } + + // small values, less than Long.MAX_VALUE + BigInteger lower1 = min.max(BigInteger.ONE); + BigInteger upper1 = max.min(BigInteger.valueOf(Integer.MAX_VALUE)); + if (lower1.compareTo(upper1) < 0) { + cases.add( + Map.entry( + "", + () -> BigInteger.valueOf(ESTestCase.randomLongBetween(lower1.longValue(), upper1.longValue())) + ) + ); + } else if (lower1.compareTo(upper1) == 0) { + cases.add(Map.entry("", () -> lower1)); + } + + // Big values, greater than Long.MAX_VALUE + BigInteger lower2 = min.max(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.ONE)); + BigInteger upper2 = max.min(BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.valueOf(Integer.MAX_VALUE))); + if (lower2.compareTo(upper2) < 0) { + cases.add( + Map.entry( + "", + () -> BigInteger.valueOf(ESTestCase.randomLongBetween(lower2.longValue(), upper2.longValue())) + ) + ); + } else if (lower2.compareTo(upper2) == 0) { + cases.add(Map.entry("", () -> lower2)); + } + return cases; + } + + private static List>> doubleCases(double min, double max) { + List>> cases = new ArrayList<>(); + + // Zeros + if (0d <= max && 0d >= min) { + cases.add(Map.entry("<0 double>", () -> 0.0d)); + cases.add(Map.entry("<-0 double>", () -> -0.0d)); + } + + // Positive small double + double lower1 = Math.max(0d, min); + double upper1 = Math.min(1d, max); + if (lower1 < upper1) { + cases.add(Map.entry("", () -> ESTestCase.randomDoubleBetween(lower1, upper1, true))); + } else if (lower1 == upper1) { + cases.add(Map.entry("", () -> lower1)); + } + + // Negative small double + double lower2 = Math.max(-1d, min); + double upper2 = Math.min(0d, max); + if (lower2 < upper2) { + cases.add(Map.entry("", () -> ESTestCase.randomDoubleBetween(lower2, upper2, true))); + } else if (lower2 == upper2) { + cases.add(Map.entry("", () -> lower2)); + } + + // Positive big double + double lower3 = Math.max(1d, min); // start at 1 (inclusive) because the density of values between 0 and 1 is very high + double upper3 = Math.min(Double.MAX_VALUE, max); + if (lower3 < upper3) { + cases.add(Map.entry("", () -> ESTestCase.randomDoubleBetween(lower3, upper3, true))); + } else if (lower3 == upper3) { + cases.add(Map.entry("", () -> lower3)); + } + + // Negative big double + // note: Double.MIN_VALUE is the smallest non-zero positive double, not the smallest non-infinite negative double. + double lower4 = Math.max(-Double.MAX_VALUE, min); + double upper4 = Math.min(-1, max); // because again, the interval from -1 to 0 is very high density + if (lower4 < upper4) { + cases.add(Map.entry("", () -> ESTestCase.randomDoubleBetween(lower4, upper4, true))); + } else if (lower4 == upper4) { + cases.add(Map.entry("", () -> lower4)); + } + return cases; + } + + private static final Map>>> RANDOM_VALUE_SUPPLIERS = Map.ofEntries( + Map.entry( + DataTypes.DOUBLE, + List.of( + Map.entry("<0 double>", () -> 0.0d), + Map.entry("", () -> ESTestCase.randomDouble()), + Map.entry("", () -> -ESTestCase.randomDouble()), + Map.entry("", () -> ESTestCase.randomDoubleBetween(0, Double.MAX_VALUE, false)), + Map.entry("", () -> ESTestCase.randomDoubleBetween(Double.MIN_VALUE, 0 - Double.MIN_NORMAL, true)) + ) + ), + Map.entry( + DataTypes.LONG, + List.of( + Map.entry("<0 long>", () -> 0L), + Map.entry("", () -> ESTestCase.randomLongBetween(1, Long.MAX_VALUE)), + Map.entry("", () -> ESTestCase.randomLongBetween(Long.MIN_VALUE, -1)) + ) + ), + Map.entry( + DataTypes.INTEGER, + List.of( + Map.entry("<0 int>", () -> 0), + Map.entry("", () -> ESTestCase.between(1, Integer.MAX_VALUE)), + Map.entry("", () -> ESTestCase.between(Integer.MIN_VALUE, -1)) + ) + ), + Map.entry( + DataTypes.UNSIGNED_LONG, + List.of( + Map.entry("<0 unsigned long>", () -> BigInteger.ZERO), + Map.entry("", () -> BigInteger.valueOf(ESTestCase.randomLongBetween(1, Integer.MAX_VALUE))), + Map.entry( + "", + () -> BigInteger.valueOf(Long.MAX_VALUE).add(BigInteger.valueOf(ESTestCase.randomLongBetween(1, Integer.MAX_VALUE))) + ) + ) + ) + ); + + private static String castToDoubleEvaluator(String original, DataType current) { + if (current == DataTypes.DOUBLE) { + return original; + } + if (current == DataTypes.INTEGER) { + return "CastIntToDoubleEvaluator[v=" + original + "]"; + } + if (current == DataTypes.LONG) { + return "CastLongToDoubleEvaluator[v=" + original + "]"; + } + if (current == DataTypes.UNSIGNED_LONG) { + return "CastUnsignedLongToDoubleEvaluator[v=" + original + "]"; + } + throw new UnsupportedOperationException(); + } + + public static class TestCase { + /** + * The {@link Source} this test case should be run with + */ + private Source source; + /** + * The parameter values and types to pass into the function for this test run + */ + private List data; + + /** + * The expected toString output for the evaluator this function invocation should generate + */ + String evaluatorToString; + /** + * The expected output type for the case being tested + */ + DataType expectedType; + /** + * A matcher to validate the output of the function run on the given input data + */ + private Matcher matcher; + + /** + * Warnings this test is expected to produce + */ + private String[] expectedWarnings; + + private final String expectedTypeError; + private final boolean allTypesAreRepresentable; + + public TestCase(List data, String evaluatorToString, DataType expectedType, Matcher matcher) { + this(data, evaluatorToString, expectedType, matcher, null, null); + } + + public static TestCase typeError(List data, String expectedTypeError) { + return new TestCase(data, null, null, null, null, expectedTypeError); + } + + TestCase( + List data, + String evaluatorToString, + DataType expectedType, + Matcher matcher, + String[] expectedWarnings, + String expectedTypeError + ) { + this.source = Source.EMPTY; + this.data = data; + this.evaluatorToString = evaluatorToString; + this.expectedType = expectedType; + this.matcher = matcher; + this.expectedWarnings = expectedWarnings; + this.expectedTypeError = expectedTypeError; + this.allTypesAreRepresentable = data.stream().allMatch(d -> EsqlDataTypes.isRepresentable(d.type)); + } + + public Source getSource() { + return source; + } + + public List getData() { + return data; + } + + public List getDataAsFields() { + return data.stream().map(t -> AbstractFunctionTestCase.field(t.name(), t.type())).collect(Collectors.toList()); + } + + public List getDataAsLiterals() { + return data.stream().map(t -> new Literal(Source.synthetic(t.name()), t.data(), t.type())).collect(Collectors.toList()); + } + + public List getDataValues() { + return data.stream().map(t -> t.data()).collect(Collectors.toList()); + } + + public boolean allTypesAreRepresentable() { + return allTypesAreRepresentable; + } + + public Matcher getMatcher() { + return matcher; + } + + public String[] getExpectedWarnings() { + return expectedWarnings; + } + + public String getExpectedTypeError() { + return expectedTypeError; + } + + public TestCase withWarning(String warning) { + String[] newWarnings; + if (expectedWarnings != null) { + newWarnings = Arrays.copyOf(expectedWarnings, expectedWarnings.length + 1); + newWarnings[expectedWarnings.length] = warning; + } else { + newWarnings = new String[] { warning }; + } + return new TestCase(data, evaluatorToString, expectedType, matcher, newWarnings, expectedTypeError); + } + } + + /** + * Holds a data value and the intended parse type of that value + * @param data - value to test against + * @param type - type of the value, for building expressions + */ + public record TypedData(Object data, DataType type, String name) { + public TypedData(Object data, String name) { + this(data, EsqlDataTypes.fromJava(data), name); + } + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java index 6fafb684d2742..ae46592a90ac1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/AbstractScalarFunctionTestCase.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Locale; import java.util.Set; +import java.util.stream.Stream; import static org.hamcrest.Matchers.equalTo; @@ -67,6 +68,14 @@ private Set withNullAndSorted(DataType[] validTypes) { return realValidTypes; } + public Set sortedTypesSet(DataType[] validTypes, DataType... additionalTypes) { + Set mergedSet = new LinkedHashSet<>(); + Stream.concat(Stream.of(validTypes), Stream.of(additionalTypes)) + .sorted(Comparator.comparing(DataType::name)) + .forEach(mergedSet::add); + return mergedSet; + } + /** * All string types (keyword, text, match_only_text, etc). For passing to {@link #required} or {@link #optional}. */ @@ -179,6 +188,12 @@ private String expectedTypeName(Set validTypes) { if (withoutNull.equals(List.of(DataTypes.DATETIME))) { return "datetime"; } + List negations = Stream.concat(Stream.of(numerics()), Stream.of(EsqlDataTypes.DATE_PERIOD, EsqlDataTypes.TIME_DURATION)) + .sorted(Comparator.comparing(DataType::name)) + .toList(); + if (withoutNull.equals(negations)) { + return "numeric, date_period or time_duration"; + } if (validTypes.equals(Set.copyOf(Arrays.asList(representable())))) { return "representable"; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/VaragsTestCaseBuilder.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/VaragsTestCaseBuilder.java index e984a39eb69da..c112917158726 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/VaragsTestCaseBuilder.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/VaragsTestCaseBuilder.java @@ -9,7 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -179,8 +179,8 @@ public VaragsTestCaseBuilder expectFlattenedBoolean(Function, Op return this; } - public List suppliers() { - List suppliers = new ArrayList<>(); + public List suppliers() { + List suppliers = new ArrayList<>(); // TODO more types if (expectedStr != null) { strings(suppliers); @@ -197,19 +197,19 @@ public List suppliers() { return suppliers; } - private void strings(List suppliers) { + private void strings(List suppliers) { for (int count = 1; count < MAX_WIDTH; count++) { for (boolean multivalued : new boolean[] { false, true }) { int paramCount = count; suppliers.add( - new AbstractFunctionTestCase.TestCaseSupplier( + new TestCaseSupplier( testCaseName(paramCount, multivalued, DataTypes.KEYWORD), dataTypes(paramCount, DataTypes.KEYWORD), () -> stringCase(DataTypes.KEYWORD, paramCount, multivalued) ) ); suppliers.add( - new AbstractFunctionTestCase.TestCaseSupplier( + new TestCaseSupplier( testCaseName(paramCount, multivalued, DataTypes.TEXT), dataTypes(paramCount, DataTypes.TEXT), () -> stringCase(DataTypes.TEXT, paramCount, multivalued) @@ -219,29 +219,27 @@ private void strings(List suppliers) } } - private AbstractFunctionTestCase.TestCase stringCase(DataType dataType, int paramCount, boolean multivalued) { + private TestCaseSupplier.TestCase stringCase(DataType dataType, int paramCount, boolean multivalued) { String[][] data = new String[paramCount][]; - List typedData = new ArrayList<>(paramCount); + List typedData = new ArrayList<>(paramCount); for (int p = 0; p < paramCount; p++) { if (multivalued) { data[p] = ESTestCase.randomList(1, 4, () -> ESTestCase.randomAlphaOfLength(5)).toArray(String[]::new); - typedData.add( - new AbstractFunctionTestCase.TypedData(Arrays.stream(data[p]).map(BytesRef::new).toList(), dataType, "field" + p) - ); + typedData.add(new TestCaseSupplier.TypedData(Arrays.stream(data[p]).map(BytesRef::new).toList(), dataType, "field" + p)); } else { data[p] = new String[] { ESTestCase.randomAlphaOfLength(5) }; - typedData.add(new AbstractFunctionTestCase.TypedData(new BytesRef(data[p][0]), dataType, "field" + p)); + typedData.add(new TestCaseSupplier.TypedData(new BytesRef(data[p][0]), dataType, "field" + p)); } } return testCase(typedData, expectedEvaluatorPrefix.apply("BytesRef"), dataType, expectedStr.apply(data)); } - private void longs(List suppliers) { + private void longs(List suppliers) { for (int count = 1; count < MAX_WIDTH; count++) { for (boolean multivalued : new boolean[] { false, true }) { int paramCount = count; suppliers.add( - new AbstractFunctionTestCase.TestCaseSupplier( + new TestCaseSupplier( testCaseName(paramCount, multivalued, DataTypes.LONG), dataTypes(paramCount, DataTypes.LONG), () -> longCase(paramCount, multivalued) @@ -251,34 +249,30 @@ private void longs(List suppliers) { } } - private AbstractFunctionTestCase.TestCase longCase(int paramCount, boolean multivalued) { + private TestCaseSupplier.TestCase longCase(int paramCount, boolean multivalued) { long[][] data = new long[paramCount][]; - List typedData = new ArrayList<>(paramCount); + List typedData = new ArrayList<>(paramCount); for (int p = 0; p < paramCount; p++) { if (multivalued) { List d = ESTestCase.randomList(1, 4, () -> ESTestCase.randomLong()); data[p] = d.stream().mapToLong(Long::longValue).toArray(); typedData.add( - new AbstractFunctionTestCase.TypedData( - Arrays.stream(data[p]).mapToObj(Long::valueOf).toList(), - DataTypes.LONG, - "field" + p - ) + new TestCaseSupplier.TypedData(Arrays.stream(data[p]).mapToObj(Long::valueOf).toList(), DataTypes.LONG, "field" + p) ); } else { data[p] = new long[] { ESTestCase.randomLong() }; - typedData.add(new AbstractFunctionTestCase.TypedData(data[p][0], DataTypes.LONG, "field" + p)); + typedData.add(new TestCaseSupplier.TypedData(data[p][0], DataTypes.LONG, "field" + p)); } } return testCase(typedData, expectedEvaluatorPrefix.apply("Long"), DataTypes.LONG, expectedLong.apply(data)); } - private void ints(List suppliers) { + private void ints(List suppliers) { for (int count = 1; count < MAX_WIDTH; count++) { for (boolean multivalued : new boolean[] { false, true }) { int paramCount = count; suppliers.add( - new AbstractFunctionTestCase.TestCaseSupplier( + new TestCaseSupplier( testCaseName(paramCount, multivalued, DataTypes.INTEGER), dataTypes(paramCount, DataTypes.INTEGER), () -> intCase(paramCount, multivalued) @@ -288,28 +282,28 @@ private void ints(List suppliers) { } } - private AbstractFunctionTestCase.TestCase intCase(int paramCount, boolean multivalued) { + private TestCaseSupplier.TestCase intCase(int paramCount, boolean multivalued) { int[][] data = new int[paramCount][]; - List typedData = new ArrayList<>(paramCount); + List typedData = new ArrayList<>(paramCount); for (int p = 0; p < paramCount; p++) { if (multivalued) { List d = ESTestCase.randomList(1, 4, () -> ESTestCase.randomInt()); data[p] = d.stream().mapToInt(Integer::intValue).toArray(); - typedData.add(new AbstractFunctionTestCase.TypedData(d, DataTypes.INTEGER, "field" + p)); + typedData.add(new TestCaseSupplier.TypedData(d, DataTypes.INTEGER, "field" + p)); } else { data[p] = new int[] { ESTestCase.randomInt() }; - typedData.add(new AbstractFunctionTestCase.TypedData(data[p][0], DataTypes.INTEGER, "field" + p)); + typedData.add(new TestCaseSupplier.TypedData(data[p][0], DataTypes.INTEGER, "field" + p)); } } return testCase(typedData, expectedEvaluatorPrefix.apply("Int"), DataTypes.INTEGER, expectedInt.apply(data)); } - private void booleans(List suppliers) { + private void booleans(List suppliers) { for (int count = 1; count < MAX_WIDTH; count++) { for (boolean multivalued : new boolean[] { false, true }) { int paramCount = count; suppliers.add( - new AbstractFunctionTestCase.TestCaseSupplier( + new TestCaseSupplier( testCaseName(paramCount, multivalued, DataTypes.BOOLEAN), dataTypes(paramCount, DataTypes.BOOLEAN), () -> booleanCase(paramCount, multivalued) @@ -319,9 +313,9 @@ private void booleans(List suppliers) } } - private AbstractFunctionTestCase.TestCase booleanCase(int paramCount, boolean multivalued) { + private TestCaseSupplier.TestCase booleanCase(int paramCount, boolean multivalued) { boolean[][] data = new boolean[paramCount][]; - List typedData = new ArrayList<>(paramCount); + List typedData = new ArrayList<>(paramCount); for (int p = 0; p < paramCount; p++) { if (multivalued) { int size = ESTestCase.between(1, 5); @@ -331,10 +325,10 @@ private AbstractFunctionTestCase.TestCase booleanCase(int paramCount, boolean mu data[p][i] = ESTestCase.randomBoolean(); paramData.add(data[p][i]); } - typedData.add(new AbstractFunctionTestCase.TypedData(paramData, DataTypes.BOOLEAN, "field" + p)); + typedData.add(new TestCaseSupplier.TypedData(paramData, DataTypes.BOOLEAN, "field" + p)); } else { data[p] = new boolean[] { ESTestCase.randomBoolean() }; - typedData.add(new AbstractFunctionTestCase.TypedData(data[p][0], DataTypes.BOOLEAN, "field" + p)); + typedData.add(new TestCaseSupplier.TypedData(data[p][0], DataTypes.BOOLEAN, "field" + p)); } } return testCase(typedData, expectedEvaluatorPrefix.apply("Boolean"), DataTypes.BOOLEAN, expectedBoolean.apply(data)); @@ -348,13 +342,13 @@ private String testCaseName(int count, boolean multivalued, DataType type) { + ")"; } - protected AbstractFunctionTestCase.TestCase testCase( - List typedData, + protected TestCaseSupplier.TestCase testCase( + List typedData, String expectedEvaluatorPrefix, DataType expectedType, Matcher expectedValue ) { - return new AbstractFunctionTestCase.TestCase( + return new TestCaseSupplier.TestCase( typedData, expectedToString(expectedEvaluatorPrefix, typedData.size()), expectedType, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java index ece2d761c5708..db2e3fc482b8d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/CaseTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.compute.data.IntBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Expression.TypeResolution; @@ -33,7 +34,7 @@ public class CaseTests extends AbstractFunctionTestCase { - public CaseTests(@Name("TestCase") Supplier testCaseSupplier) { + public CaseTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -43,12 +44,12 @@ public CaseTests(@Name("TestCase") Supplier testCaseSupplier) { @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("basics", () -> { - List typedData = List.of( - new TypedData(true, DataTypes.BOOLEAN, "cond"), - new TypedData(new BytesRef("a"), DataTypes.KEYWORD, "a"), - new TypedData(new BytesRef("b"), DataTypes.KEYWORD, "b") + List typedData = List.of( + new TestCaseSupplier.TypedData(true, DataTypes.BOOLEAN, "cond"), + new TestCaseSupplier.TypedData(new BytesRef("a"), DataTypes.KEYWORD, "a"), + new TestCaseSupplier.TypedData(new BytesRef("b"), DataTypes.KEYWORD, "b") ); - return new TestCase( + return new TestCaseSupplier.TestCase( typedData, "CaseEvaluator[resultType=BYTES_REF, conditions=[ConditionEvaluator[condition=Attribute[channel=0], " + "value=Attribute[channel=1]]], elseVal=Attribute[channel=2]]", diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java index 7f210e25205e1..22f0207525b24 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/GreatestTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.VaragsTestCaseBuilder; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -26,7 +27,7 @@ import static org.hamcrest.Matchers.equalTo; public class GreatestTests extends AbstractFunctionTestCase { - public GreatestTests(@Name("TestCase") Supplier testCaseSupplier) { + public GreatestTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -43,10 +44,10 @@ public static Iterable parameters() { new TestCaseSupplier( "(a, b)", List.of(DataTypes.KEYWORD, DataTypes.KEYWORD), - () -> new TestCase( + () -> new TestCaseSupplier.TestCase( List.of( - new TypedData(new BytesRef("a"), DataTypes.KEYWORD, "a"), - new TypedData(new BytesRef("b"), DataTypes.KEYWORD, "b") + new TestCaseSupplier.TypedData(new BytesRef("a"), DataTypes.KEYWORD, "a"), + new TestCaseSupplier.TypedData(new BytesRef("b"), DataTypes.KEYWORD, "b") ), "GreatestBytesRefEvaluator[values=[MvMax[field=Attribute[channel=0]], MvMax[field=Attribute[channel=1]]]]", DataTypes.KEYWORD, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java index 8ed2e6bfc4824..25deaaa9f45aa 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/conditional/LeastTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.VaragsTestCaseBuilder; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -25,7 +26,7 @@ import static org.hamcrest.Matchers.equalTo; public class LeastTests extends AbstractFunctionTestCase { - public LeastTests(@Name("TestCase") Supplier testCaseSupplier) { + public LeastTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -42,10 +43,10 @@ public static Iterable parameters() { new TestCaseSupplier( "(a, b)", List.of(DataTypes.KEYWORD, DataTypes.KEYWORD), - () -> new TestCase( + () -> new TestCaseSupplier.TestCase( List.of( - new TypedData(new BytesRef("a"), DataTypes.KEYWORD, "a"), - new TypedData(new BytesRef("b"), DataTypes.KEYWORD, "b") + new TestCaseSupplier.TypedData(new BytesRef("a"), DataTypes.KEYWORD, "a"), + new TestCaseSupplier.TypedData(new BytesRef("b"), DataTypes.KEYWORD, "b") ), "LeastBytesRefEvaluator[values=[MvMin[field=Attribute[channel=0]], MvMin[field=Attribute[channel=1]]]]", DataTypes.KEYWORD, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java index f56af90bafe84..a87e7c5eb5bb1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateExtractTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -29,17 +30,17 @@ import static org.hamcrest.Matchers.is; public class DateExtractTests extends AbstractScalarFunctionTestCase { - public DateExtractTests(@Name("TestCase") Supplier testCaseSupplier) { + public DateExtractTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Date Extract Year", () -> { - return new TestCase( + return new TestCaseSupplier.TestCase( List.of( - new TypedData(1687944333000L, DataTypes.DATETIME, "date"), - new TypedData(new BytesRef("YEAR"), DataTypes.KEYWORD, "field") + new TestCaseSupplier.TypedData(1687944333000L, DataTypes.DATETIME, "date"), + new TestCaseSupplier.TypedData(new BytesRef("YEAR"), DataTypes.KEYWORD, "field") ), "DateExtractEvaluator[value=Attribute[channel=0], chronoField=Attribute[channel=1], zone=Z]", DataTypes.LONG, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java index d2ecc980596ed..48e70e929f8e1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/date/DateParseTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -23,17 +24,17 @@ import static org.hamcrest.Matchers.equalTo; public class DateParseTests extends AbstractScalarFunctionTestCase { - public DateParseTests(@Name("TestCase") Supplier testCaseSupplier) { + public DateParseTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Basic Case", () -> { - return new TestCase( + return new TestCaseSupplier.TestCase( List.of( - new TypedData(new BytesRef("2023-05-05"), DataTypes.KEYWORD, "first"), - new TypedData(new BytesRef("yyyy-MM-dd"), DataTypes.KEYWORD, "second") + new TestCaseSupplier.TypedData(new BytesRef("2023-05-05"), DataTypes.KEYWORD, "first"), + new TestCaseSupplier.TypedData(new BytesRef("yyyy-MM-dd"), DataTypes.KEYWORD, "second") ), "DateParseEvaluator[val=Attribute[channel=0], formatter=Attribute[channel=1], zoneId=Z]", DataTypes.DATETIME, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java index 5dc96ba97a56c..e6621fdf78408 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AbsTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -28,8 +29,8 @@ public static Iterable parameters() { List suppliers = new ArrayList<>(); suppliers.add(new TestCaseSupplier(List.of(DataTypes.INTEGER), () -> { int arg = randomInt(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.INTEGER, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.INTEGER, "arg")), "AbsIntEvaluator[fieldVal=Attribute[channel=0]]", DataTypes.INTEGER, equalTo(Math.abs(arg)) @@ -37,8 +38,8 @@ public static Iterable parameters() { })); suppliers.add(new TestCaseSupplier(List.of(DataTypes.UNSIGNED_LONG), () -> { long arg = randomLong(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")), "Attribute[channel=0]", DataTypes.UNSIGNED_LONG, equalTo(arg) @@ -46,8 +47,8 @@ public static Iterable parameters() { })); suppliers.add(new TestCaseSupplier(List.of(DataTypes.LONG), () -> { long arg = randomLong(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.LONG, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.LONG, "arg")), "AbsLongEvaluator[fieldVal=Attribute[channel=0]]", DataTypes.LONG, equalTo(Math.abs(arg)) @@ -55,8 +56,8 @@ public static Iterable parameters() { })); suppliers.add(new TestCaseSupplier(List.of(DataTypes.DOUBLE), () -> { double arg = randomDouble(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.DOUBLE, "arg")), "AbsDoubleEvaluator[fieldVal=Attribute[channel=0]]", DataTypes.DOUBLE, equalTo(Math.abs(arg)) @@ -65,7 +66,7 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); } - public AbsTests(@Name("TestCase") Supplier testCaseSupplier) { + public AbsTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java index 492036a73c9d6..0958fb67ee805 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AcosTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class AcosTests extends AbstractFunctionTestCase { - public AcosTests(@Name("TestCase") Supplier testCaseSupplier) { + public AcosTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java index aebfd7a19ab54..a3f1fcee7a092 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AsinTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class AsinTests extends AbstractFunctionTestCase { - public AsinTests(@Name("TestCase") Supplier testCaseSupplier) { + public AsinTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java index ccfb115a230f4..3f4de813679da 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Atan2Tests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class Atan2Tests extends AbstractFunctionTestCase { - public Atan2Tests(@Name("TestCase") Supplier testCaseSupplier) { + public Atan2Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java index 657cb84445b88..f529b07071720 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AtanTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class AtanTests extends AbstractFunctionTestCase { - public AtanTests(@Name("TestCase") Supplier testCaseSupplier) { + public AtanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucketTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucketTests.java index d2f5e8f7fef5d..79089864daa4a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucketTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/AutoBucketTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.common.Rounding; import org.elasticsearch.index.mapper.DateFieldMapper; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -27,17 +28,21 @@ import static org.hamcrest.Matchers.equalTo; public class AutoBucketTests extends AbstractScalarFunctionTestCase { - public AutoBucketTests(@Name("TestCase") Supplier testCaseSupplier) { + public AutoBucketTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Autobucket Single date", () -> { - List args = List.of( - new TypedData(DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-02-17T09:00:00.00Z"), DataTypes.DATETIME, "arg") + List args = List.of( + new TestCaseSupplier.TypedData( + DateFieldMapper.DEFAULT_DATE_TIME_FORMATTER.parseMillis("2023-02-17T09:00:00.00Z"), + DataTypes.DATETIME, + "arg" + ) ); - return new TestCase( + return new TestCaseSupplier.TestCase( args, "DateTruncEvaluator[fieldVal=Attribute[channel=0], rounding=Rounding[DAY_OF_MONTH in Z][fixed to midnight]]", DataTypes.DATETIME, @@ -67,7 +72,7 @@ protected DataType expectedType(List argTypes) { return argTypes.get(0); } - private static Matcher resultsMatcher(List typedData) { + private static Matcher resultsMatcher(List typedData) { long millis = ((Number) typedData.get(0).data()).longValue(); return equalTo(Rounding.builder(Rounding.DateTimeUnit.DAY_OF_MONTH).build().prepareForUnknown().round(millis)); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java index 69c2a2817c6bc..f4e03de146c54 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CeilTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -22,7 +23,7 @@ import static org.hamcrest.Matchers.equalTo; public class CeilTests extends AbstractScalarFunctionTestCase { - public CeilTests(@Name("TestCase") Supplier testCaseSupplier) { + public CeilTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -30,27 +31,32 @@ public CeilTests(@Name("TestCase") Supplier testCaseSupplier) { public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("large double value", () -> { double arg = 1 / randomDouble(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.DOUBLE, "arg")), "CeilDoubleEvaluator[val=Attribute[channel=0]]", DataTypes.DOUBLE, equalTo(Math.ceil(arg)) ); }), new TestCaseSupplier("integer value", () -> { int arg = randomInt(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.INTEGER, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.INTEGER, "arg")), "Attribute[channel=0]", DataTypes.INTEGER, equalTo(arg) ); }), new TestCaseSupplier("long value", () -> { long arg = randomLong(); - return new TestCase(List.of(new TypedData(arg, DataTypes.LONG, "arg")), "Attribute[channel=0]", DataTypes.LONG, equalTo(arg)); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.LONG, "arg")), + "Attribute[channel=0]", + DataTypes.LONG, + equalTo(arg) + ); }), new TestCaseSupplier("unsigned long value", () -> { long arg = randomLong(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.UNSIGNED_LONG, "arg")), "Attribute[channel=0]", DataTypes.UNSIGNED_LONG, equalTo(arg) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java index d1d668cf5de3f..70d3f7704a0dc 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CosTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class CosTests extends AbstractFunctionTestCase { - public CosTests(@Name("TestCase") Supplier testCaseSupplier) { + public CosTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java index 259d3edb54e40..4e6add3ffe0b4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/CoshTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class CoshTests extends AbstractFunctionTestCase { - public CoshTests(@Name("TestCase") Supplier testCaseSupplier) { + public CoshTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java index 8947ad78e0356..c66b752aeb6db 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/ETests.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -24,15 +25,15 @@ import static org.hamcrest.Matchers.equalTo; public class ETests extends AbstractScalarFunctionTestCase { - public ETests(@Name("TestCase") Supplier testCaseSupplier) { + public ETests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("E Test", () -> { - return new TestCase( - List.of(new TypedData(1, DataTypes.INTEGER, "foo")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "foo")), "LiteralsEvaluator[block=2.718281828459045]", DataTypes.DOUBLE, equalTo(Math.E) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java index 530fcc177a0ac..9485aed23320a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/FloorTests.java @@ -11,17 +11,21 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.util.NumericUtils; +import java.math.BigInteger; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier.MAX_UNSIGNED_LONG; + public class FloorTests extends AbstractFunctionTestCase { - public FloorTests(@Name("TestCase") Supplier testCaseSupplier) { + public FloorTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -29,10 +33,24 @@ public FloorTests(@Name("TestCase") Supplier testCaseSupplier) { public static Iterable parameters() { String read = "Attribute[channel=0]"; List suppliers = new ArrayList<>(); - TestCaseSupplier.forUnaryInt(suppliers, read, DataTypes.INTEGER, i -> i); - TestCaseSupplier.forUnaryLong(suppliers, read, DataTypes.LONG, l -> l); - TestCaseSupplier.forUnaryUnsignedLong(suppliers, read, DataTypes.UNSIGNED_LONG, ul -> NumericUtils.asLongUnsigned(ul)); - TestCaseSupplier.forUnaryDouble(suppliers, "FloorDoubleEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::floor); + TestCaseSupplier.forUnaryInt(suppliers, read, DataTypes.INTEGER, i -> i, Integer.MIN_VALUE, Integer.MAX_VALUE); + TestCaseSupplier.forUnaryLong(suppliers, read, DataTypes.LONG, l -> l, Long.MIN_VALUE, Long.MAX_VALUE); + TestCaseSupplier.forUnaryUnsignedLong( + suppliers, + read, + DataTypes.UNSIGNED_LONG, + ul -> NumericUtils.asLongUnsigned(ul), + BigInteger.ZERO, + MAX_UNSIGNED_LONG + ); + TestCaseSupplier.forUnaryDouble( + suppliers, + "FloorDoubleEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::floor, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY + ); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(false, suppliers))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteTests.java index ea342a1b173f9..45903e34ba498 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsFiniteTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; public class IsFiniteTests extends AbstractRationalUnaryPredicateTests { - public IsFiniteTests(@Name("TestCase") Supplier testCaseSupplier) { + public IsFiniteTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -40,9 +41,9 @@ public static Iterable parameters() { ); } - private static TestCase makeTestCase(double val, boolean expected) { - return new TestCase( - List.of(new TypedData(val, DataTypes.DOUBLE, "arg")), + private static TestCaseSupplier.TestCase makeTestCase(double val, boolean expected) { + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(val, DataTypes.DOUBLE, "arg")), "IsFiniteEvaluator[val=Attribute[channel=0]]", DataTypes.BOOLEAN, equalTo(expected) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteTests.java index b74d60a2a2e77..46f4bb742a840 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsInfiniteTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; public class IsInfiniteTests extends AbstractRationalUnaryPredicateTests { - public IsInfiniteTests(@Name("TestCase") Supplier testCaseSupplier) { + public IsInfiniteTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -40,9 +41,9 @@ public static Iterable parameters() { ); } - private static TestCase makeTestCase(double val, boolean expected) { - return new TestCase( - List.of(new TypedData(val, DataTypes.DOUBLE, "arg")), + private static TestCaseSupplier.TestCase makeTestCase(double val, boolean expected) { + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(val, DataTypes.DOUBLE, "arg")), "IsInfiniteEvaluator[val=Attribute[channel=0]]", DataTypes.BOOLEAN, equalTo(expected) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNTests.java index 79120081815b0..ba22b503d5297 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/IsNaNTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; public class IsNaNTests extends AbstractRationalUnaryPredicateTests { - public IsNaNTests(@Name("TestCase") Supplier testCaseSupplier) { + public IsNaNTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -41,9 +42,9 @@ public static Iterable parameters() { ); } - private static TestCase makeTestCase(double val, boolean expected) { - return new TestCase( - List.of(new TypedData(val, DataTypes.DOUBLE, "arg")), + private static TestCaseSupplier.TestCase makeTestCase(double val, boolean expected) { + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(val, DataTypes.DOUBLE, "arg")), "IsNaNEvaluator[val=Attribute[channel=0]]", DataTypes.BOOLEAN, equalTo(expected) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java index 5222fc605a6bd..fff62198f6e48 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/Log10Tests.java @@ -11,16 +11,20 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.math.BigInteger; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier.MAX_UNSIGNED_LONG; + public class Log10Tests extends AbstractFunctionTestCase { - public Log10Tests(@Name("TestCase") Supplier testCaseSupplier) { + public Log10Tests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -28,15 +32,38 @@ public Log10Tests(@Name("TestCase") Supplier testCaseSupplier) { public static Iterable parameters() { String read = "Attribute[channel=0]"; List suppliers = new ArrayList<>(); - TestCaseSupplier.forUnaryInt(suppliers, "Log10IntEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::log10); - TestCaseSupplier.forUnaryLong(suppliers, "Log10LongEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::log10); + TestCaseSupplier.forUnaryInt( + suppliers, + "Log10IntEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::log10, + Integer.MIN_VALUE, + Integer.MAX_VALUE + ); + TestCaseSupplier.forUnaryLong( + suppliers, + "Log10LongEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::log10, + Long.MIN_VALUE, + Long.MAX_VALUE + ); TestCaseSupplier.forUnaryUnsignedLong( suppliers, "Log10UnsignedLongEvaluator[val=" + read + "]", DataTypes.DOUBLE, - ul -> Math.log10(ul.doubleValue()) + ul -> Math.log10(ul.doubleValue()), + BigInteger.ZERO, + MAX_UNSIGNED_LONG + ); + TestCaseSupplier.forUnaryDouble( + suppliers, + "Log10DoubleEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::log10, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY ); - TestCaseSupplier.forUnaryDouble(suppliers, "Log10DoubleEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::log10); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java index 12dc65da440f7..454e17933cfef 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PiTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -24,15 +25,15 @@ import static org.hamcrest.Matchers.equalTo; public class PiTests extends AbstractScalarFunctionTestCase { - public PiTests(@Name("TestCase") Supplier testCaseSupplier) { + public PiTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Pi Test", () -> { - return new TestCase( - List.of(new TypedData(1, DataTypes.INTEGER, "foo")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "foo")), "LiteralsEvaluator[block=3.141592653589793]", DataTypes.DOUBLE, equalTo(Math.PI) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java index db42f61241ffc..b890f786f3755 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/PowTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -23,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; public class PowTests extends AbstractScalarFunctionTestCase { - public PowTests(@Name("TestCase") Supplier testCaseSupplier) { + public PowTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -32,8 +33,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("pow(, )", () -> { double base = 1 / randomDouble(); int exponent = between(-30, 30); - return new TestCase( - List.of(new TypedData(base, DataTypes.DOUBLE, "arg"), new TypedData(exponent, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "arg"), + new TestCaseSupplier.TypedData(exponent, DataTypes.INTEGER, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(Math.pow(base, exponent)) @@ -41,8 +45,11 @@ public static Iterable parameters() { }), new TestCaseSupplier( "pow(NaN, 1)", - () -> new TestCase( - List.of(new TypedData(Double.NaN, DataTypes.DOUBLE, "base"), new TypedData(1.0d, DataTypes.DOUBLE, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(Double.NaN, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(1.0d, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(null) @@ -51,8 +58,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(1, NaN)", - () -> new TestCase( - List.of(new TypedData(1.0d, DataTypes.DOUBLE, "base"), new TypedData(Double.NaN, DataTypes.DOUBLE, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1.0d, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(Double.NaN, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(null) @@ -61,8 +71,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(NaN, 0)", - () -> new TestCase( - List.of(new TypedData(Double.NaN, DataTypes.DOUBLE, "base"), new TypedData(0d, DataTypes.DOUBLE, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(Double.NaN, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(0d, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(1d) @@ -70,8 +83,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(0, 0)", - () -> new TestCase( - List.of(new TypedData(0d, DataTypes.DOUBLE, "base"), new TypedData(0d, DataTypes.DOUBLE, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(0d, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(0d, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(1d) @@ -79,8 +95,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(1, 1)", - () -> new TestCase( - List.of(new TypedData(1, DataTypes.INTEGER, "base"), new TypedData(1, DataTypes.INTEGER, "base")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "base") + ), "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.INTEGER, @@ -89,10 +108,10 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(integer, 0)", - () -> new TestCase( + () -> new TestCaseSupplier.TestCase( List.of( - new TypedData(randomValueOtherThan(0, ESTestCase::randomInt), DataTypes.INTEGER, "base"), - new TypedData(0, DataTypes.INTEGER, "exp") + new TestCaseSupplier.TypedData(randomValueOtherThan(0, ESTestCase::randomInt), DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "exp") ), "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", @@ -102,8 +121,11 @@ public static Iterable parameters() { ), new TestCaseSupplier("pow(integer, 2)", () -> { int base = randomIntBetween(-1000, 1000); - return new TestCase( - List.of(new TypedData(base, DataTypes.INTEGER, "base"), new TypedData(2, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") + ), "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.INTEGER, @@ -112,8 +134,11 @@ public static Iterable parameters() { }), new TestCaseSupplier( "integer overflow case", - () -> new TestCase( - List.of(new TypedData(Integer.MAX_VALUE, DataTypes.INTEGER, "base"), new TypedData(2, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(Integer.MAX_VALUE, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") + ), "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.INTEGER, @@ -123,8 +148,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "long overflow case", - () -> new TestCase( - List.of(new TypedData(Long.MAX_VALUE, DataTypes.LONG, "base"), new TypedData(2, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(Long.MAX_VALUE, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -134,8 +162,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(2, 0.5) == sqrt(2)", - () -> new TestCase( - List.of(new TypedData(2, DataTypes.INTEGER, "base"), new TypedData(0.5, DataTypes.DOUBLE, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(0.5, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(Math.sqrt(2)) @@ -143,8 +174,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(2.0, 0.5) == sqrt(2)", - () -> new TestCase( - List.of(new TypedData(2d, DataTypes.DOUBLE, "base"), new TypedData(0.5, DataTypes.DOUBLE, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(2d, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(0.5, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(Math.sqrt(2)) @@ -155,8 +189,11 @@ public static Iterable parameters() { int base = randomIntBetween(0, 1000); double exp = randomDoubleBetween(-10.0, 10.0, true); double expected = Math.pow(base, exp); - TestCase testCase = new TestCase( - List.of(new TypedData(base, DataTypes.INTEGER, "base"), new TypedData(exp, DataTypes.DOUBLE, "exp")), + TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(expected) @@ -167,8 +204,11 @@ public static Iterable parameters() { // Negative numbers to a non-integer power are NaN int base = randomIntBetween(-1000, -1); double exp = randomDouble(); // between 0 and 1 - TestCase testCase = new TestCase( - List.of(new TypedData(base, DataTypes.INTEGER, "base"), new TypedData(exp, DataTypes.DOUBLE, "exp")), + TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(null) @@ -178,8 +218,11 @@ public static Iterable parameters() { }), new TestCaseSupplier( "pow(123, -1)", - () -> new TestCase( - List.of(new TypedData(123, DataTypes.INTEGER, "base"), new TypedData(-1, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(123, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(-1, DataTypes.INTEGER, "exp") + ), "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.INTEGER, @@ -188,8 +231,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(123L, -1)", - () -> new TestCase( - List.of(new TypedData(123L, DataTypes.LONG, "base"), new TypedData(-1, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(123L, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(-1, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -198,8 +244,11 @@ public static Iterable parameters() { ), new TestCaseSupplier( "pow(123D, -1)", - () -> new TestCase( - List.of(new TypedData(123.0, DataTypes.DOUBLE, "base"), new TypedData(-1, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(123.0, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(-1, DataTypes.INTEGER, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(1D / 123D) @@ -207,8 +256,11 @@ public static Iterable parameters() { ), new TestCaseSupplier("pow(integer, 1)", () -> { int base = randomInt(); - return new TestCase( - List.of(new TypedData(base, DataTypes.INTEGER, "base"), new TypedData(1, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.INTEGER, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") + ), "PowIntEvaluator[base=CastIntToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.INTEGER, @@ -217,8 +269,11 @@ public static Iterable parameters() { }), new TestCaseSupplier( "pow(1L, 1)", - () -> new TestCase( - List.of(new TypedData(1L, DataTypes.LONG, "base"), new TypedData(1, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1L, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -228,8 +283,11 @@ public static Iterable parameters() { new TestCaseSupplier("pow(long, 1)", () -> { // Avoid double precision loss long base = randomLongBetween(-1L << 51, 1L << 51); - return new TestCase( - List.of(new TypedData(base, DataTypes.LONG, "base"), new TypedData(1, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -239,8 +297,11 @@ public static Iterable parameters() { new TestCaseSupplier("long-double overflow", () -> { long base = 4339622345450989181L; // Not exactly representable as a double long expected = 4339622345450989056L; - return new TestCase( - List.of(new TypedData(base, DataTypes.LONG, "base"), new TypedData(1, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -249,8 +310,11 @@ public static Iterable parameters() { }), new TestCaseSupplier("pow(long, 0)", () -> { long base = randomLong(); - return new TestCase( - List.of(new TypedData(base, DataTypes.LONG, "base"), new TypedData(0, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -259,8 +323,11 @@ public static Iterable parameters() { }), new TestCaseSupplier("pow(long, 2)", () -> { long base = randomLongBetween(-1000, 1000); - return new TestCase( - List.of(new TypedData(base, DataTypes.LONG, "base"), new TypedData(2, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") + ), "PowLongEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], " + "exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.LONG, @@ -272,8 +339,11 @@ public static Iterable parameters() { long base = randomLongBetween(0, 1000); double exp = randomDoubleBetween(-10.0, 10.0, true); double expected = Math.pow(base, exp); - TestCase testCase = new TestCase( - List.of(new TypedData(base, DataTypes.LONG, "base"), new TypedData(exp, DataTypes.DOUBLE, "exp")), + TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.LONG, "base"), + new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=CastLongToDoubleEvaluator[v=Attribute[channel=0]], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(expected) @@ -282,8 +352,11 @@ public static Iterable parameters() { }), new TestCaseSupplier( "pow(1D, 1)", - () -> new TestCase( - List.of(new TypedData(1D, DataTypes.DOUBLE, "base"), new TypedData(1, DataTypes.INTEGER, "exp")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(1D, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(1D) @@ -297,8 +370,11 @@ public static Iterable parameters() { // Sometimes pick a large number base = 1 / randomDouble(); } - return new TestCase( - List.of(new TypedData(base, DataTypes.DOUBLE, "base"), new TypedData(1, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(base) @@ -312,8 +388,11 @@ public static Iterable parameters() { // Sometimes pick a large number base = 1 / randomDouble(); } - return new TestCase( - List.of(new TypedData(base, DataTypes.DOUBLE, "base"), new TypedData(0, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(1D) @@ -321,8 +400,11 @@ public static Iterable parameters() { }), new TestCaseSupplier("pow(double, 2)", () -> { double base = randomDoubleBetween(-1000, 1000, true); - return new TestCase( - List.of(new TypedData(base, DataTypes.DOUBLE, "base"), new TypedData(2, DataTypes.INTEGER, "exp")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(2, DataTypes.INTEGER, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=CastIntToDoubleEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(Math.pow(base, 2)) @@ -332,8 +414,11 @@ public static Iterable parameters() { // Negative numbers to a non-integer power are NaN double base = randomDoubleBetween(0, 1000, true); double exp = randomDoubleBetween(-10.0, 10.0, true); - TestCase testCase = new TestCase( - List.of(new TypedData(base, DataTypes.DOUBLE, "base"), new TypedData(exp, DataTypes.DOUBLE, "exp")), + TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(base, DataTypes.DOUBLE, "base"), + new TestCaseSupplier.TypedData(exp, DataTypes.DOUBLE, "exp") + ), "PowDoubleEvaluator[base=Attribute[channel=0], exponent=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(Math.pow(base, exp)) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java index 385b42d23a177..f9e8886f960ad 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/RoundTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.math.Maths; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class RoundTests extends AbstractScalarFunctionTestCase { - public RoundTests(@Name("TestCase") Supplier testCaseSupplier) { + public RoundTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("round(, )", () -> { double number = 1 / randomDouble(); int precision = between(-30, 30); - return new TestCase( - List.of(new TypedData(number, DataTypes.DOUBLE, "number"), new TypedData(precision, DataTypes.INTEGER, "precision")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(number, DataTypes.DOUBLE, "number"), + new TestCaseSupplier.TypedData(precision, DataTypes.INTEGER, "precision") + ), "RoundDoubleEvaluator[val=Attribute[channel=0], decimals=CastIntToLongEvaluator[v=Attribute[channel=1]]]", DataTypes.DOUBLE, equalTo(Maths.round(number, precision)) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java index c5ad76afc95b3..e04d1fb9b3c80 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class SinTests extends AbstractFunctionTestCase { - public SinTests(@Name("TestCase") Supplier testCaseSupplier) { + public SinTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java index 163209d1b32ee..4e51a424aa21f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SinhTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class SinhTests extends AbstractFunctionTestCase { - public SinhTests(@Name("TestCase") Supplier testCaseSupplier) { + public SinhTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java index e328f38f1b64c..348360d9a395f 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/SqrtTests.java @@ -11,16 +11,20 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.math.BigInteger; import java.util.ArrayList; import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier.MAX_UNSIGNED_LONG; + public class SqrtTests extends AbstractFunctionTestCase { - public SqrtTests(@Name("TestCase") Supplier testCaseSupplier) { + public SqrtTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -28,15 +32,38 @@ public SqrtTests(@Name("TestCase") Supplier testCaseSupplier) { public static Iterable parameters() { String read = "Attribute[channel=0]"; List suppliers = new ArrayList<>(); - TestCaseSupplier.forUnaryInt(suppliers, "SqrtIntEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::sqrt); - TestCaseSupplier.forUnaryLong(suppliers, "SqrtLongEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::sqrt); + TestCaseSupplier.forUnaryInt( + suppliers, + "SqrtIntEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::sqrt, + Integer.MIN_VALUE, + Integer.MAX_VALUE + ); + TestCaseSupplier.forUnaryLong( + suppliers, + "SqrtLongEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::sqrt, + Long.MIN_VALUE, + Long.MAX_VALUE + ); TestCaseSupplier.forUnaryUnsignedLong( suppliers, "SqrtUnsignedLongEvaluator[val=" + read + "]", DataTypes.DOUBLE, - ul -> Math.sqrt(ul.doubleValue()) + ul -> Math.sqrt(ul.doubleValue()), + BigInteger.ZERO, + MAX_UNSIGNED_LONG + ); + TestCaseSupplier.forUnaryDouble( + suppliers, + "SqrtDoubleEvaluator[val=" + read + "]", + DataTypes.DOUBLE, + Math::sqrt, + Double.NEGATIVE_INFINITY, + Double.POSITIVE_INFINITY ); - TestCaseSupplier.forUnaryDouble(suppliers, "SqrtDoubleEvaluator[val=" + read + "]", DataTypes.DOUBLE, Math::sqrt); return parameterSuppliersFromTypedData(errorsForCasesWithoutExamples(anyNullIsNull(true, suppliers))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java index c98351fc73051..afb7e4c695228 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class TanTests extends AbstractFunctionTestCase { - public TanTests(@Name("TestCase") Supplier testCaseSupplier) { + public TanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java index 8338950815bfc..a86dfc0b6efee 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TanhTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -18,7 +19,7 @@ import java.util.function.Supplier; public class TanhTests extends AbstractFunctionTestCase { - public TanhTests(@Name("TestCase") Supplier testCaseSupplier) { + public TanhTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java index 25560533898fe..ae07e73309379 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/math/TauTests.java @@ -12,6 +12,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.DoubleBlock; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -24,15 +25,15 @@ import static org.hamcrest.Matchers.equalTo; public class TauTests extends AbstractScalarFunctionTestCase { - public TauTests(@Name("TestCase") Supplier testCaseSupplier) { + public TauTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Tau Test", () -> { - return new TestCase( - List.of(new TypedData(1, DataTypes.INTEGER, "foo")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(1, DataTypes.INTEGER, "foo")), "LiteralsEvaluator[block=6.283185307179586]", DataTypes.DOUBLE, equalTo(Tau.TAU) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java index 4d4d658727d70..0f48da5d80f72 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/AbstractMultivalueFunctionTestCase.java @@ -14,6 +14,7 @@ import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.data.Vector; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.planner.LocalExecutionPlanner; import org.elasticsearch.xpack.ql.expression.Expression; @@ -61,8 +62,8 @@ protected static void booleans( cases.add( new TestCaseSupplier( name + "(false)", - () -> new TestCase( - List.of(new TypedData(List.of(false), DataTypes.BOOLEAN, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(false), DataTypes.BOOLEAN, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, Stream.of(false)) @@ -72,8 +73,8 @@ protected static void booleans( cases.add( new TestCaseSupplier( name + "(true)", - () -> new TestCase( - List.of(new TypedData(List.of(true), DataTypes.BOOLEAN, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(true), DataTypes.BOOLEAN, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, Stream.of(true)) @@ -84,8 +85,8 @@ protected static void booleans( cases.add(new TestCaseSupplier(name + "() " + ordering, () -> { List mvData = randomList(2, 100, ESTestCase::randomBoolean); putInOrder(mvData, ordering); - return new TestCase( - List.of(new TypedData(mvData, DataTypes.BOOLEAN, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(mvData, DataTypes.BOOLEAN, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(mvData.size(), mvData.stream()) @@ -119,8 +120,8 @@ protected static void bytesRefs( cases.add( new TestCaseSupplier( name + "(\"\")", - () -> new TestCase( - List.of(new TypedData(List.of(new BytesRef("")), DataTypes.KEYWORD, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(new BytesRef("")), DataTypes.KEYWORD, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, Stream.of(new BytesRef(""))) @@ -129,8 +130,8 @@ protected static void bytesRefs( ); cases.add(new TestCaseSupplier(name + "(BytesRef)", () -> { BytesRef data = new BytesRef(randomAlphaOfLength(10)); - return new TestCase( - List.of(new TypedData(List.of(data), DataTypes.KEYWORD, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(data), DataTypes.KEYWORD, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, Stream.of(data)) @@ -140,8 +141,8 @@ protected static void bytesRefs( cases.add(new TestCaseSupplier(name + "() " + ordering, () -> { List mvData = randomList(1, 100, () -> new BytesRef(randomAlphaOfLength(10))); putInOrder(mvData, ordering); - return new TestCase( - List.of(new TypedData(mvData, DataTypes.KEYWORD, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(mvData, DataTypes.KEYWORD, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(mvData.size(), mvData.stream()) @@ -175,8 +176,8 @@ protected static void doubles( cases.add( new TestCaseSupplier( name + "(0.0)", - () -> new TestCase( - List.of(new TypedData(List.of(0.0), DataTypes.DOUBLE, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(0.0), DataTypes.DOUBLE, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, DoubleStream.of(0.0)) @@ -185,8 +186,8 @@ protected static void doubles( ); cases.add(new TestCaseSupplier(name + "(double)", () -> { double mvData = randomDouble(); - return new TestCase( - List.of(new TypedData(List.of(mvData), DataTypes.DOUBLE, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(mvData), DataTypes.DOUBLE, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, DoubleStream.of(mvData)) @@ -196,8 +197,8 @@ protected static void doubles( cases.add(new TestCaseSupplier(name + "() " + ordering, () -> { List mvData = randomList(1, 100, ESTestCase::randomDouble); putInOrder(mvData, ordering); - return new TestCase( - List.of(new TypedData(mvData, DataTypes.DOUBLE, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(mvData, DataTypes.DOUBLE, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(mvData.size(), mvData.stream().mapToDouble(Double::doubleValue)) @@ -231,8 +232,8 @@ protected static void ints( cases.add( new TestCaseSupplier( name + "(0)", - () -> new TestCase( - List.of(new TypedData(List.of(0), DataTypes.INTEGER, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(0), DataTypes.INTEGER, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, IntStream.of(0)) @@ -241,8 +242,8 @@ protected static void ints( ); cases.add(new TestCaseSupplier(name + "(int)", () -> { int data = randomInt(); - return new TestCase( - List.of(new TypedData(List.of(data), DataTypes.INTEGER, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(data), DataTypes.INTEGER, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, IntStream.of(data)) @@ -252,8 +253,8 @@ protected static void ints( cases.add(new TestCaseSupplier(name + "() " + ordering, () -> { List mvData = randomList(1, 100, ESTestCase::randomInt); putInOrder(mvData, ordering); - return new TestCase( - List.of(new TypedData(mvData, DataTypes.INTEGER, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(mvData, DataTypes.INTEGER, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(mvData.size(), mvData.stream().mapToInt(Integer::intValue)) @@ -287,8 +288,8 @@ protected static void longs( cases.add( new TestCaseSupplier( name + "(0L)", - () -> new TestCase( - List.of(new TypedData(List.of(0L), DataTypes.LONG, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(0L), DataTypes.LONG, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, LongStream.of(0L)) @@ -297,8 +298,8 @@ protected static void longs( ); cases.add(new TestCaseSupplier(name + "(long)", () -> { long data = randomLong(); - return new TestCase( - List.of(new TypedData(List.of(data), DataTypes.LONG, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(data), DataTypes.LONG, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, LongStream.of(data)) @@ -308,8 +309,8 @@ protected static void longs( cases.add(new TestCaseSupplier(name + "() " + ordering, () -> { List mvData = randomList(1, 100, ESTestCase::randomLong); putInOrder(mvData, ordering); - return new TestCase( - List.of(new TypedData(mvData, DataTypes.LONG, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(mvData, DataTypes.LONG, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(mvData.size(), mvData.stream().mapToLong(Long::longValue)) @@ -343,8 +344,14 @@ protected static void unsignedLongs( cases.add( new TestCaseSupplier( name + "(0UL)", - () -> new TestCase( - List.of(new TypedData(List.of(NumericUtils.asLongUnsigned(BigInteger.ZERO)), DataTypes.UNSIGNED_LONG, "field")), + () -> new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData( + List.of(NumericUtils.asLongUnsigned(BigInteger.ZERO)), + DataTypes.UNSIGNED_LONG, + "field" + ) + ), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, Stream.of(BigInteger.ZERO)) @@ -353,8 +360,8 @@ protected static void unsignedLongs( ); cases.add(new TestCaseSupplier(name + "(unsigned long)", () -> { long data = randomLong(); - return new TestCase( - List.of(new TypedData(List.of(data), DataTypes.UNSIGNED_LONG, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(data), DataTypes.UNSIGNED_LONG, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(1, Stream.of(NumericUtils.unsignedLongAsBigInteger(data))) @@ -364,8 +371,8 @@ protected static void unsignedLongs( cases.add(new TestCaseSupplier(name + "() " + ordering, () -> { List mvData = randomList(1, 100, ESTestCase::randomLong); putInOrder(mvData, ordering); - return new TestCase( - List.of(new TypedData(mvData, DataTypes.UNSIGNED_LONG, "field")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(mvData, DataTypes.UNSIGNED_LONG, "field")), evaluatorName + "[field=Attribute[channel=0]]", expectedDataType, matcher.apply(mvData.size(), mvData.stream().map(NumericUtils::unsignedLongAsBigInteger)) @@ -423,7 +430,7 @@ public final void testBlockWithNulls() { private void testBlock(boolean insertNulls) { int positions = between(1, 1024); - TypedData data = testCase.getData().get(0); + TestCaseSupplier.TypedData data = testCase.getData().get(0); Block oneRowBlock = BlockUtils.fromListRow(testCase.getDataValues())[0]; ElementType elementType = LocalExecutionPlanner.toElementType(data.type()); Block.Builder builder = elementType.newBlockBuilder(positions); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java index e08edf17aa47f..b0e459164de71 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvAvgTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.elasticsearch.search.aggregations.metrics.CompensatedSum; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -27,7 +28,7 @@ import static org.hamcrest.Matchers.equalTo; public class MvAvgTests extends AbstractMultivalueFunctionTestCase { - public MvAvgTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvAvgTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java index 5fda4979e176b..4bd6403c98e2a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvConcatTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -26,17 +27,21 @@ import static org.hamcrest.Matchers.nullValue; public class MvConcatTests extends AbstractScalarFunctionTestCase { - public MvConcatTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvConcatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("mv_concat basic test", () -> { - return new TestCase( + return new TestCaseSupplier.TestCase( List.of( - new TypedData(List.of(new BytesRef("foo"), new BytesRef("bar"), new BytesRef("baz")), DataTypes.KEYWORD, "field"), - new TypedData(new BytesRef(", "), DataTypes.KEYWORD, "delim") + new TestCaseSupplier.TypedData( + List.of(new BytesRef("foo"), new BytesRef("bar"), new BytesRef("baz")), + DataTypes.KEYWORD, + "field" + ), + new TestCaseSupplier.TypedData(new BytesRef(", "), DataTypes.KEYWORD, "delim") ), "MvConcat[field=Attribute[channel=0], delim=Attribute[channel=1]]", DataTypes.KEYWORD, diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java index baa0332bf6024..a13a43bdee75c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvCountTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -22,7 +23,7 @@ import static org.hamcrest.Matchers.equalTo; public class MvCountTests extends AbstractMultivalueFunctionTestCase { - public MvCountTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvCountTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java index b04a58d9fb07d..713ae263705d3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvDedupeTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -28,7 +29,7 @@ import static org.hamcrest.Matchers.nullValue; public class MvDedupeTests extends AbstractMultivalueFunctionTestCase { - public MvDedupeTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvDedupeTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java index 30ecf8981f7e1..556cedf259a86 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMaxTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class MvMaxTests extends AbstractMultivalueFunctionTestCase { - public MvMaxTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvMaxTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java index 954e931eab77e..047dc4fe64641 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMedianTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class MvMedianTests extends AbstractMultivalueFunctionTestCase { - public MvMedianTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvMedianTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -72,8 +73,8 @@ public static Iterable parameters() { cases.add( new TestCaseSupplier( "mv_median(<1, 2>)", - () -> new TestCase( - List.of(new TypedData(List.of(1, 2), DataTypes.INTEGER, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(1, 2), DataTypes.INTEGER, "field")), "MvMedian[field=Attribute[channel=0]]", DataTypes.INTEGER, equalTo(1) @@ -83,8 +84,8 @@ public static Iterable parameters() { cases.add( new TestCaseSupplier( "mv_median(<-1, -2>)", - () -> new TestCase( - List.of(new TypedData(List.of(-1, -2), DataTypes.INTEGER, "field")), + () -> new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(List.of(-1, -2), DataTypes.INTEGER, "field")), "MvMedian[field=Attribute[channel=0]]", DataTypes.INTEGER, equalTo(-2) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java index 4c4991a78a569..c1dd713e6639c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvMinTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class MvMinTests extends AbstractMultivalueFunctionTestCase { - public MvMinTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvMinTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumTests.java index cfa505fd6a18b..da6a7aec8462c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/multivalue/MvSumTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -21,7 +22,7 @@ import static org.hamcrest.Matchers.equalTo; public class MvSumTests extends AbstractMultivalueFunctionTestCase { - public MvSumTests(@Name("TestCase") Supplier testCaseSupplier) { + public MvSumTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java index 3da4c26e43660..cba09907f35b9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/CoalesceTests.java @@ -13,6 +13,7 @@ import org.elasticsearch.compute.data.Block; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.VaragsTestCaseBuilder; import org.elasticsearch.xpack.esql.planner.Layout; import org.elasticsearch.xpack.ql.expression.Expression; @@ -31,7 +32,7 @@ import static org.hamcrest.Matchers.equalTo; public class CoalesceTests extends AbstractFunctionTestCase { - public CoalesceTests(@Name("TestCase") Supplier testCaseSupplier) { + public CoalesceTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java index 12e1d8a680052..b3e7712484875 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNotNullTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -28,15 +29,15 @@ import static org.hamcrest.Matchers.equalTo; public class IsNotNullTests extends AbstractScalarFunctionTestCase { - public IsNotNullTests(@Name("TestCase") Supplier testCaseSupplier) { + public IsNotNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Keyword Not Null", () -> { - return new TestCase( - List.of(new TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), "IsNotNullEvaluator[field=Attribute[channel=0]]", DataTypes.BOOLEAN, equalTo(true) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java index 799471a799cf9..1818e703828af 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/nulls/IsNullTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BooleanBlock; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; @@ -28,15 +29,15 @@ import static org.hamcrest.Matchers.equalTo; public class IsNullTests extends AbstractScalarFunctionTestCase { - public IsNullTests(@Name("TestCase") Supplier testCaseSupplier) { + public IsNullTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @ParametersFactory public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Keyword is Null", () -> { - return new TestCase( - List.of(new TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(new BytesRef("cat"), DataTypes.KEYWORD, "exp")), "IsNullEvaluator[field=Attribute[channel=0]]", DataTypes.BOOLEAN, equalTo(false) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java index c5f7448edd6f6..fdb9387b410ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/AbstractTrimTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.expression.function.scalar.string; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.type.DataType; @@ -69,9 +70,9 @@ static Iterable parameters(String name, boolean trimLeading, boolean t return parameterSuppliersFromTypedData(suppliers); } - private static TestCase testCase(String name, DataType type, String data, String expected) { - return new TestCase( - List.of(new TypedData(new BytesRef(data), type, "str")), + private static TestCaseSupplier.TestCase testCase(String name, DataType type, String data, String expected) { + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(new BytesRef(data), type, "str")), name + "[val=Attribute[channel=0]]", type, equalTo(new BytesRef(expected)) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java index cb40427bdc48a..2b10c2c4f806e 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/ConcatTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -29,7 +30,7 @@ import static org.hamcrest.Matchers.equalTo; public class ConcatTests extends AbstractScalarFunctionTestCase { - public ConcatTests(@Name("TestCase") Supplier testCaseSupplier) { + public ConcatTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -38,8 +39,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("concat basic test", () -> { BytesRef first = new BytesRef(randomAlphaOfLength(3)); BytesRef second = new BytesRef(randomAlphaOfLength(3)); - return new TestCase( - List.of(new TypedData(first, DataTypes.KEYWORD, "first"), new TypedData(second, DataTypes.KEYWORD, "second")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(first, DataTypes.KEYWORD, "first"), + new TestCaseSupplier.TypedData(second, DataTypes.KEYWORD, "second") + ), "ConcatEvaluator[values=[Attribute[channel=0], Attribute[channel=1]]]", DataTypes.KEYWORD, equalTo(new BytesRef(first.utf8ToString() + second.utf8ToString())) @@ -52,7 +56,7 @@ protected DataType expectedType(List argTypes) { return DataTypes.KEYWORD; } - private Matcher resultsMatcher(List simpleData) { + private Matcher resultsMatcher(List simpleData) { return equalTo(new BytesRef(simpleData.stream().map(o -> ((BytesRef) o.data()).utf8ToString()).collect(Collectors.joining()))); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimTests.java index 74ae9a09244d5..23171545dc693 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LTrimTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +18,7 @@ import java.util.function.Supplier; public class LTrimTests extends AbstractTrimTests { - public LTrimTests(@Name("TestCase") Supplier testCaseSupplier) { + public LTrimTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java new file mode 100644 index 0000000000000..eb68509fdfafa --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LeftTests.java @@ -0,0 +1,208 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.hamcrest.Matcher; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; +import static org.hamcrest.Matchers.equalTo; + +public class LeftTests extends AbstractScalarFunctionTestCase { + public LeftTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + + suppliers.add(new TestCaseSupplier("empty string", () -> { + int length = between(-64, 64); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(""), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + + suppliers.add(new TestCaseSupplier("ascii", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(1, text.length()); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(unicodeLeftSubstring(text, length))) + ); + })); + suppliers.add(new TestCaseSupplier("ascii longer than string", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(text.length(), 128); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(text)) + ); + })); + suppliers.add(new TestCaseSupplier("ascii zero length", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + suppliers.add(new TestCaseSupplier("ascii negative length", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(-128, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + + suppliers.add(new TestCaseSupplier("unicode", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(1, text.length()); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(unicodeLeftSubstring(text, length))) + ); + })); + suppliers.add(new TestCaseSupplier("unicode longer than string", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(text.length(), 128); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(text)) + ); + })); + suppliers.add(new TestCaseSupplier("unicode zero length", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + suppliers.add(new TestCaseSupplier("unicode negative length", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(-128, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "LeftEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + + return parameterSuppliersFromTypedData(suppliers); + } + + private static String unicodeLeftSubstring(String str, int length) { + if (length < 0) { + return ""; + } else { + return str.codePoints() + .limit(length) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString(); + } + } + + @Override + protected Expression build(Source source, List args) { + return new Left(source, args.get(0), args.get(1)); + } + + @Override + protected List argSpec() { + return List.of(required(strings()), required(integers())); + } + + @Override + protected DataType expectedType(List argTypes) { + return DataTypes.KEYWORD; + } + + public Matcher resultsMatcher(List typedData) { + String str = ((BytesRef) typedData.get(0).data()).utf8ToString(); + int length = (Integer) typedData.get(1).data(); + return equalTo(new BytesRef(str.substring(0, length))); + } + + public void testUnicode() { + final String s = "a\ud83c\udf09tiger"; + assert s.codePointCount(0, s.length()) == 7; + assertThat(process(s, 2), equalTo("a\ud83c\udf09")); + } + + private String process(String str, int length) { + Block result = evaluator( + new Left(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, length, DataTypes.INTEGER)) + ).get().eval(row(List.of(new BytesRef(str)))); + if (null == result) { + return null; + } + BytesRef resultByteRef = ((BytesRef) toJavaObject(result, 0)); + return resultByteRef == null ? null : resultByteRef.utf8ToString(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java index eac3fbeb2149a..c6eb2d1f2a2c0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/LengthTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.UnicodeUtil; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -25,7 +26,7 @@ import static org.hamcrest.Matchers.equalTo; public class LengthTests extends AbstractScalarFunctionTestCase { - public LengthTests(@Name("TestCase") Supplier testCaseSupplier) { + public LengthTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,8 @@ public LengthTests(@Name("TestCase") Supplier testCaseSupplier) { public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("length basic test", () -> { BytesRef value = new BytesRef(randomAlphaOfLength(between(0, 10000))); - return new TestCase( - List.of(new TypedData(value, DataTypes.KEYWORD, "f")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(value, DataTypes.KEYWORD, "f")), "LengthEvaluator[val=Attribute[channel=0]]", DataTypes.INTEGER, equalTo(UnicodeUtil.codePointCount(value)) @@ -50,9 +51,9 @@ public static Iterable parameters() { )); } - private static TestCase makeTestCase(String text, int expectedLength) { - return new TestCase( - List.of(new TypedData(new BytesRef(text), DataTypes.KEYWORD, "f")), + private static TestCaseSupplier.TestCase makeTestCase(String text, int expectedLength) { + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "f")), "LengthEvaluator[val=Attribute[channel=0]]", DataTypes.INTEGER, equalTo(expectedLength) @@ -64,7 +65,7 @@ protected DataType expectedType(List argTypes) { return DataTypes.INTEGER; } - private Matcher resultsMatcher(List typedData) { + private Matcher resultsMatcher(List typedData) { return equalTo(UnicodeUtil.codePointCount((BytesRef) typedData.get(0).data())); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimTests.java index a6017f5162e7e..151612bb9c569 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RTrimTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +18,7 @@ import java.util.function.Supplier; public class RTrimTests extends AbstractTrimTests { - public RTrimTests(@Name("TestCase") Supplier testCaseSupplier) { + public RTrimTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java new file mode 100644 index 0000000000000..540051d9ac8b8 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/RightTests.java @@ -0,0 +1,210 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.function.scalar.string; + +import com.carrotsearch.randomizedtesting.annotations.Name; +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.compute.data.Block; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; +import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.hamcrest.Matcher; + +import java.util.ArrayList; +import java.util.List; +import java.util.function.Supplier; + +import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; +import static org.hamcrest.Matchers.equalTo; + +public class RightTests extends AbstractScalarFunctionTestCase { + public RightTests(@Name("TestCase") Supplier testCaseSupplier) { + this.testCase = testCaseSupplier.get(); + } + + @ParametersFactory + public static Iterable parameters() { + List suppliers = new ArrayList<>(); + + suppliers.add(new TestCaseSupplier("empty string", () -> { + int length = between(-64, 64); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(""), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + + suppliers.add(new TestCaseSupplier("ascii", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(1, text.length()); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(unicodeRightSubstring(text, length))) + ); + })); + suppliers.add(new TestCaseSupplier("ascii longer than string", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(text.length(), 128); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(text)) + ); + })); + suppliers.add(new TestCaseSupplier("ascii zero length", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + suppliers.add(new TestCaseSupplier("ascii negative length", () -> { + String text = randomAlphaOfLengthBetween(1, 64); + int length = between(-128, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + + suppliers.add(new TestCaseSupplier("unicode", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(1, text.length()); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(unicodeRightSubstring(text, length))) + ); + })); + suppliers.add(new TestCaseSupplier("unicode longer than string", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(text.length(), 128); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef(text)) + ); + })); + suppliers.add(new TestCaseSupplier("unicode zero length", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(0, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + suppliers.add(new TestCaseSupplier("unicode negative length", () -> { + String text = randomUnicodeOfLengthBetween(1, 64); + int length = between(-128, -1); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "length") + ), + "RightEvaluator[str=Attribute[channel=0], length=Attribute[channel=1]]", + DataTypes.KEYWORD, + equalTo(new BytesRef("")) + ); + })); + + return parameterSuppliersFromTypedData(suppliers); + } + + private static String unicodeRightSubstring(String str, int length) { + int codepointCount = str.codePointCount(0, str.length()); + int codePointsToSkip = codepointCount - length; + if (codePointsToSkip < 0) { + return str; + } else { + return str.codePoints() + .skip(codePointsToSkip) + .collect(StringBuilder::new, StringBuilder::appendCodePoint, StringBuilder::append) + .toString(); + } + } + + @Override + protected Expression build(Source source, List args) { + return new Right(source, args.get(0), args.get(1)); + } + + @Override + protected List argSpec() { + return List.of(required(strings()), required(integers())); + } + + @Override + protected DataType expectedType(List argTypes) { + return DataTypes.KEYWORD; + } + + public Matcher resultsMatcher(List typedData) { + String str = ((BytesRef) typedData.get(0).data()).utf8ToString(); + int length = (Integer) typedData.get(1).data(); + return equalTo(new BytesRef(str.substring(str.length() - length))); + } + + public void testUnicode() { + final String s = "a\ud83c\udf09tiger"; + assert s.codePointCount(0, s.length()) == 7; + assertThat(process(s, 6), equalTo("\ud83c\udf09tiger")); + } + + private String process(String str, int length) { + Block result = evaluator( + new Right(Source.EMPTY, field("str", DataTypes.KEYWORD), new Literal(Source.EMPTY, length, DataTypes.INTEGER)) + ).get().eval(row(List.of(new BytesRef(str)))); + if (null == result) { + return null; + } + BytesRef resultByteRef = ((BytesRef) toJavaObject(result, 0)); + return resultByteRef == null ? null : resultByteRef.utf8ToString(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java index 3c2f996993b11..aba167759d32a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SplitTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.Page; import org.elasticsearch.compute.operator.EvalOperator; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -34,7 +35,7 @@ import static org.hamcrest.Matchers.equalTo; public class SplitTests extends AbstractScalarFunctionTestCase { - public SplitTests(@Name("TestCase") Supplier testCaseSupplier) { + public SplitTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -47,10 +48,10 @@ public static Iterable parameters() { .map(BytesRef::new) .collect(Collectors.toList()); String str = strings.stream().map(BytesRef::utf8ToString).collect(joining(delimiter)); - return new TestCase( + return new TestCaseSupplier.TestCase( List.of( - new TypedData(new BytesRef(str), DataTypes.KEYWORD, "str"), - new TypedData(new BytesRef(delimiter), DataTypes.KEYWORD, "delim") + new TestCaseSupplier.TypedData(new BytesRef(str), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(new BytesRef(delimiter), DataTypes.KEYWORD, "delim") ), "SplitVariableEvaluator[str=Attribute[channel=0], delim=Attribute[channel=1]]", DataTypes.KEYWORD, @@ -64,7 +65,7 @@ protected DataType expectedType(List argTypes) { return DataTypes.KEYWORD; } - private Matcher resultsMatcher(List typedData) { + private Matcher resultsMatcher(List typedData) { String str = ((BytesRef) typedData.get(0).data()).utf8ToString(); String delim = ((BytesRef) typedData.get(1).data()).utf8ToString(); List split = Arrays.stream(str.split(Pattern.quote(delim))).map(BytesRef::new).toList(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java index 20221436d0660..6eacea1d02987 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/StartsWithTests.java @@ -11,6 +11,7 @@ import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class StartsWithTests extends AbstractScalarFunctionTestCase { - public StartsWithTests(@Name("TestCase") Supplier testCaseSupplier) { + public StartsWithTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -36,10 +37,10 @@ public static Iterable parameters() { if (randomBoolean()) { str = prefix + str; } - return new TestCase( + return new TestCaseSupplier.TestCase( List.of( - new TypedData(new BytesRef(str), DataTypes.KEYWORD, "str"), - new TypedData(new BytesRef(prefix), DataTypes.KEYWORD, "prefix") + new TestCaseSupplier.TypedData(new BytesRef(str), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(new BytesRef(prefix), DataTypes.KEYWORD, "prefix") ), "StartsWithEvaluator[str=Attribute[channel=0], prefix=Attribute[channel=1]]", DataTypes.BOOLEAN, @@ -53,7 +54,7 @@ protected DataType expectedType(List argTypes) { return DataTypes.BOOLEAN; } - private Matcher resultsMatcher(List typedData) { + private Matcher resultsMatcher(List typedData) { String str = ((BytesRef) typedData.get(0).data()).utf8ToString(); String prefix = ((BytesRef) typedData.get(1).data()).utf8ToString(); return equalTo(str.startsWith(prefix)); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java index 83974a232e49c..5742b97f8f3ff 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/SubstringTests.java @@ -12,6 +12,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.compute.data.Block; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.Literal; @@ -28,7 +29,7 @@ import static org.hamcrest.Matchers.equalTo; public class SubstringTests extends AbstractScalarFunctionTestCase { - public SubstringTests(@Name("TestCase") Supplier testCaseSupplier) { + public SubstringTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -38,11 +39,11 @@ public static Iterable parameters() { int start = between(1, 8); int length = between(1, 10 - start); String text = randomAlphaOfLength(10); - return new TestCase( + return new TestCaseSupplier.TestCase( List.of( - new TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), - new TypedData(start, DataTypes.INTEGER, "start"), - new TypedData(length, DataTypes.INTEGER, "end") + new TestCaseSupplier.TypedData(new BytesRef(text), DataTypes.KEYWORD, "str"), + new TestCaseSupplier.TypedData(start, DataTypes.INTEGER, "start"), + new TestCaseSupplier.TypedData(length, DataTypes.INTEGER, "end") ), "SubstringEvaluator[str=Attribute[channel=0], start=Attribute[channel=1], length=Attribute[channel=2]]", DataTypes.KEYWORD, @@ -56,7 +57,7 @@ protected DataType expectedType(List argTypes) { return DataTypes.KEYWORD; } - public Matcher resultsMatcher(List typedData) { + public Matcher resultsMatcher(List typedData) { String str = ((BytesRef) typedData.get(0).data()).utf8ToString(); int start = (Integer) typedData.get(1).data(); int end = (Integer) typedData.get(2).data(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimTests.java index 422d4da9dd121..631e0f0242eb2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/function/scalar/string/TrimTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; @@ -17,7 +18,7 @@ import java.util.function.Supplier; public class TrimTests extends AbstractTrimTests { - public TrimTests(@Name("TestCase") Supplier testCaseSupplier) { + public TrimTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java index 1415861cb481c..8a4f8963cba96 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/AbstractBinaryOperatorTestCase.java @@ -9,6 +9,7 @@ import org.elasticsearch.xpack.esql.analysis.Verifier; import org.elasticsearch.xpack.esql.expression.function.AbstractFunctionTestCase; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.common.Failure; import org.elasticsearch.xpack.ql.expression.Expression; @@ -24,14 +25,16 @@ import java.util.Locale; import static org.elasticsearch.compute.data.BlockUtils.toJavaObject; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isRepresentable; import static org.elasticsearch.xpack.ql.type.DataTypeConverter.commonType; +import static org.elasticsearch.xpack.ql.type.DataTypes.isNull; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; public abstract class AbstractBinaryOperatorTestCase extends AbstractFunctionTestCase { - protected abstract Matcher resultsMatcher(List typedData); + protected abstract Matcher resultsMatcher(List typedData); /** * Return a {@link Matcher} to validate the results of evaluating the function @@ -52,37 +55,48 @@ protected Expression build(Source source, List args) { protected abstract BinaryOperator build(Source source, Expression lhs, Expression rhs); + /** + * What type is acceptable for any of the function parameters. + * @param type The type to probe. + * @return True if the type is supported by the respective function. + */ protected abstract boolean supportsType(DataType type); + /** + * What combination of parameter types are acceptable by the function. + * @param lhsType Left argument type. + * @param rhsType Right argument type. + * @return True if the type combination is supported by the respective function. + */ + protected boolean supportsTypes(DataType lhsType, DataType rhsType) { + if (isNull(lhsType) || isNull(rhsType)) { + return false; + } + if ((lhsType == DataTypes.UNSIGNED_LONG || rhsType == DataTypes.UNSIGNED_LONG) && lhsType != rhsType) { + // UL can only be operated on together with another UL, so skip non-UL&UL combinations + return false; + } + return supportsType(lhsType) && supportsType(rhsType); + } + public final void testApplyToAllTypes() { for (DataType lhsType : EsqlDataTypes.types()) { - if (EsqlDataTypes.isRepresentable(lhsType) == false || lhsType == DataTypes.NULL) { - continue; - } - if (supportsType(lhsType) == false) { - continue; - } - Literal lhs = randomLiteral(lhsType); for (DataType rhsType : EsqlDataTypes.types()) { - if (EsqlDataTypes.isRepresentable(rhsType) == false || rhsType == DataTypes.NULL) { - continue; - } - if (supportsType(rhsType) == false) { - continue; - } - if (false == (lhsType == rhsType || lhsType.isNumeric() && rhsType.isNumeric())) { - continue; - } - if (lhsType != rhsType && (lhsType == DataTypes.UNSIGNED_LONG || rhsType == DataTypes.UNSIGNED_LONG)) { + if (supportsTypes(lhsType, rhsType) == false) { continue; } + Literal lhs = randomLiteral(lhsType); Literal rhs = randomValueOtherThanMany(l -> rhsOk(l.value()) == false, () -> randomLiteral(rhsType)); - BinaryOperator op = build( - new Source(Location.EMPTY, lhsType.typeName() + " " + rhsType.typeName()), - field("lhs", lhsType), - field("rhs", rhsType) - ); - Object result = toJavaObject(evaluator(op).get().eval(row(List.of(lhs.value(), rhs.value()))), 0); + Object result; + BinaryOperator op; + Source src = new Source(Location.EMPTY, lhsType.typeName() + " " + rhsType.typeName()); + if (isRepresentable(lhsType) && isRepresentable(rhsType)) { + op = build(src, field("lhs", lhsType), field("rhs", rhsType)); + result = toJavaObject(evaluator(op).get().eval(row(List.of(lhs.value(), rhs.value()))), 0); + } else { + op = build(src, lhs, rhs); + result = op.fold(); + } if (result == null) { assertCriticalWarnings( "Line -1:-1: evaluation of [" + op + "] failed, treating result as null. Only first 20 failures recorded.", @@ -100,12 +114,12 @@ public final void testApplyToAllTypes() { public final void testResolveType() { for (DataType lhsType : EsqlDataTypes.types()) { - if (EsqlDataTypes.isRepresentable(lhsType) == false) { + if (isRepresentable(lhsType) == false) { continue; } Literal lhs = randomLiteral(lhsType); for (DataType rhsType : EsqlDataTypes.types()) { - if (EsqlDataTypes.isRepresentable(rhsType) == false) { + if (isRepresentable(rhsType) == false) { continue; } Literal rhs = randomLiteral(rhsType); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticTestCase.java index 9c084cee44484..49233a19114c8 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractArithmeticTestCase.java @@ -7,7 +7,9 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.predicate.operator.AbstractBinaryOperatorTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -21,7 +23,7 @@ import static org.hamcrest.Matchers.is; public abstract class AbstractArithmeticTestCase extends AbstractBinaryOperatorTestCase { - protected final Matcher resultMatcher(List data, DataType dataType) { + protected Matcher resultMatcher(List data, DataType dataType) { Number lhs = (Number) data.get(0); Number rhs = (Number) data.get(1); if (lhs instanceof Double || rhs instanceof Double) { @@ -40,7 +42,7 @@ protected final Matcher resultMatcher(List data, DataType dataTy } @Override - protected Matcher resultsMatcher(List typedData) { + protected Matcher resultsMatcher(List typedData) { Number lhs = (Number) typedData.get(0).data(); Number rhs = (Number) typedData.get(1).data(); if (typedData.stream().anyMatch(t -> t.type().equals(DataTypes.DOUBLE))) { @@ -67,12 +69,12 @@ protected Matcher resultsMatcher(List typedData) { protected abstract long expectedUnsignedLongValue(long lhs, long rhs); @Override - protected final boolean supportsType(DataType type) { - return type.isNumeric(); + protected boolean supportsType(DataType type) { + return type.isNumeric() && EsqlDataTypes.isRepresentable(type); } @Override - protected final void validateType(BinaryOperator op, DataType lhsType, DataType rhsType) { + protected void validateType(BinaryOperator op, DataType lhsType, DataType rhsType) { if (DataTypes.isNullOrNumeric(lhsType) && DataTypes.isNullOrNumeric(rhsType)) { assertTrue(op.toString(), op.typeResolved().resolved()); assertThat(op.toString(), op.dataType(), equalTo(expectedType(lhsType, rhsType))); @@ -97,7 +99,7 @@ protected final void validateType(BinaryOperator op, DataType lhsTyp ); } - private DataType expectedType(DataType lhsType, DataType rhsType) { + protected DataType expectedType(DataType lhsType, DataType rhsType) { if (lhsType == DataTypes.DOUBLE || rhsType == DataTypes.DOUBLE) { return DataTypes.DOUBLE; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java new file mode 100644 index 0000000000000..a620a95ea3c0f --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AbstractDateTimeArithmeticTestCase.java @@ -0,0 +1,73 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic; + +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; +import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; +import org.elasticsearch.xpack.ql.type.DataType; +import org.elasticsearch.xpack.ql.type.DataTypes; +import org.hamcrest.Matcher; + +import java.time.temporal.TemporalAmount; +import java.util.List; +import java.util.Locale; + +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.oneOf; + +public abstract class AbstractDateTimeArithmeticTestCase extends AbstractArithmeticTestCase { + + @Override + protected Matcher resultMatcher(List data, DataType dataType) { + Object lhs = data.get(0); + Object rhs = data.get(1); + if (lhs instanceof TemporalAmount || rhs instanceof TemporalAmount) { + TemporalAmount temporal = lhs instanceof TemporalAmount leftTemporal ? leftTemporal : (TemporalAmount) rhs; + long datetime = temporal == lhs ? (Long) rhs : (Long) lhs; + return equalTo(expectedValue(datetime, temporal)); + } + return super.resultMatcher(data, dataType); + } + + protected abstract long expectedValue(long datetime, TemporalAmount temporalAmount); + + @Override + protected final boolean supportsType(DataType type) { + return EsqlDataTypes.isDateTimeOrTemporal(type) || super.supportsType(type); + } + + @Override + protected void validateType(BinaryOperator op, DataType lhsType, DataType rhsType) { + if (isDateTime(lhsType) && isTemporalAmount(rhsType) || isTemporalAmount(lhsType) && isDateTime(rhsType)) { + assertTrue(op.toString(), op.typeResolved().resolved()); + assertTrue(op.toString(), isTemporalAmount(lhsType) || isTemporalAmount(rhsType)); + assertFalse(op.toString(), isTemporalAmount(lhsType) && isTemporalAmount(rhsType)); + assertThat(op.toString(), op.dataType(), equalTo(expectedType(lhsType, rhsType))); + assertThat(op.toString(), op.getClass(), oneOf(Add.class, Sub.class)); + } else if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { + assertFalse(op.toString(), op.typeResolved().resolved()); + assertThat( + op.toString(), + op.typeResolved().message(), + equalTo( + String.format(Locale.ROOT, "[%s] has arguments with incompatible types [%s] and [%s]", op.symbol(), lhsType, rhsType) + ) + ); + } else { + super.validateType(op, lhsType, rhsType); + } + } + + @Override + protected DataType expectedType(DataType lhsType, DataType rhsType) { + return isDateTimeOrTemporal(lhsType) ? DataTypes.DATETIME : super.expectedType(lhsType, rhsType); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java index 8356e549e5c49..805eb3fd557c9 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/AddTests.java @@ -10,20 +10,31 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import java.math.BigInteger; +import java.time.Duration; +import java.time.Period; +import java.time.temporal.TemporalAmount; import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; +import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsBigInteger; import static org.hamcrest.Matchers.equalTo; -public class AddTests extends AbstractArithmeticTestCase { - public AddTests(@Name("TestCase") Supplier testCaseSupplier) { +public class AddTests extends AbstractDateTimeArithmeticTestCase { + public AddTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +44,11 @@ public static Iterable parameters() { // Ensure we don't have an overflow int rhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1); int lhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "AddIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, equalTo(lhs + rhs) @@ -43,8 +57,11 @@ public static Iterable parameters() { // Ensure we don't have an overflow long rhs = randomLongBetween((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1); long lhs = randomLongBetween((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.LONG, "lhs"), new TypedData(rhs, DataTypes.LONG, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.LONG, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.LONG, "rhs") + ), "AddLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.LONG, equalTo(lhs + rhs) @@ -52,8 +69,11 @@ public static Iterable parameters() { }), new TestCaseSupplier("Double + Double", () -> { double rhs = randomDouble(); double lhs = randomDouble(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.DOUBLE, "lhs"), new TypedData(rhs, DataTypes.DOUBLE, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DOUBLE, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.DOUBLE, "rhs") + ), "AddDoublesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(lhs + rhs) @@ -71,9 +91,63 @@ public static Iterable parameters() { "AddUnsignedLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", equalTo(asLongUnsigned(lhsBI.add(rhsBI).longValue())) ); - }) - */ - )); + }) */, new TestCaseSupplier("Datetime + Period", () -> { + long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); + Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs") + ), + "AddDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + DataTypes.DATETIME, + equalTo(asMillis(asDateTime(lhs).plus(rhs))) + ); + }), new TestCaseSupplier("Period + Datetime", () -> { + Period lhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + long rhs = (Long) randomLiteral(DataTypes.DATETIME).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, EsqlDataTypes.DATE_PERIOD, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.DATETIME, "rhs") + ), + "AddDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + DataTypes.DATETIME, + equalTo(asMillis(asDateTime(rhs).plus(lhs))) + ); + }), new TestCaseSupplier("Datetime + Duration", () -> { + long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "AddDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + DataTypes.DATETIME, + equalTo(asMillis(asDateTime(lhs).plus(rhs))) + ); + }), new TestCaseSupplier("Duration + Datetime", () -> { + long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "AddDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + DataTypes.DATETIME, + equalTo(asMillis(asDateTime(lhs).plus(rhs))) + ); + }))); + } + + @Override + protected boolean supportsTypes(DataType lhsType, DataType rhsType) { + if (isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType)) { + return isDateTime(lhsType) && isTemporalAmount(rhsType) || isTemporalAmount(lhsType) && isDateTime(rhsType); + } + return super.supportsTypes(lhsType, rhsType); } @Override @@ -102,4 +176,9 @@ protected long expectedUnsignedLongValue(long lhs, long rhs) { BigInteger rhsBI = unsignedLongAsBigInteger(rhs); return asLongUnsigned(lhsBI.add(rhsBI).longValue()); } + + @Override + protected long expectedValue(long datetime, TemporalAmount temporalAmount) { + return asMillis(asDateTime(datetime).plus(temporalAmount)); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java index c7026d768a968..82b6bbda276b6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/DivTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -23,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; public class DivTests extends AbstractArithmeticTestCase { - public DivTests(@Name("TestCase") Supplier testCaseSupplier) { + public DivTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -35,8 +36,11 @@ public static Iterable parameters() { do { rhs = randomInt(); } while (rhs == 0); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "DivIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, equalTo(lhs / rhs) @@ -47,8 +51,11 @@ public static Iterable parameters() { do { rhs = randomLong(); } while (rhs == 0); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.LONG, "lhs"), new TypedData(rhs, DataTypes.LONG, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.LONG, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.LONG, "rhs") + ), "DivLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.LONG, equalTo(lhs / rhs) @@ -59,8 +66,11 @@ public static Iterable parameters() { do { rhs = randomDouble(); } while (rhs == 0); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.DOUBLE, "lhs"), new TypedData(rhs, DataTypes.DOUBLE, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DOUBLE, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.DOUBLE, "rhs") + ), "DivDoublesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(lhs / rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java index ad47233b29705..425ef2bb11a6b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/ModTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -23,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; public class ModTests extends AbstractArithmeticTestCase { - public ModTests(@Name("TestCase") Supplier testCaseSupplier) { + public ModTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -35,8 +36,11 @@ public static Iterable parameters() { do { rhs = randomInt(); } while (rhs == 0); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "ModIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, equalTo(lhs % rhs) @@ -47,8 +51,11 @@ public static Iterable parameters() { do { rhs = randomLong(); } while (rhs == 0); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.LONG, "lhs"), new TypedData(rhs, DataTypes.LONG, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.LONG, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.LONG, "rhs") + ), "ModLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.LONG, equalTo(lhs % rhs) @@ -59,8 +66,11 @@ public static Iterable parameters() { do { rhs = randomDouble(); } while (rhs == 0); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.DOUBLE, "lhs"), new TypedData(rhs, DataTypes.DOUBLE, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DOUBLE, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.DOUBLE, "rhs") + ), "ModDoublesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(lhs % rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java index 0d03a2dde8c18..2ab72ebf9d5f6 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/MulTests.java @@ -10,6 +10,7 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -23,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; public class MulTests extends AbstractArithmeticTestCase { - public MulTests(@Name("TestCase") Supplier testCaseSupplier) { + public MulTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { // Ensure we don't have an overflow int rhs = randomIntBetween(-255, 255); int lhs = randomIntBetween(-255, 255); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "MulIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, equalTo(lhs * rhs) @@ -43,8 +47,11 @@ public static Iterable parameters() { // Ensure we don't have an overflow long rhs = randomLongBetween(-1024, 1024); long lhs = randomLongBetween(-1024, 1024); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.LONG, "lhs"), new TypedData(rhs, DataTypes.LONG, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.LONG, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.LONG, "rhs") + ), "MulLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.LONG, equalTo(lhs * rhs) @@ -52,8 +59,11 @@ public static Iterable parameters() { }), new TestCaseSupplier("Double * Double", () -> { double rhs = randomDouble(); double lhs = randomDouble(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.DOUBLE, "lhs"), new TypedData(rhs, DataTypes.DOUBLE, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DOUBLE, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.DOUBLE, "rhs") + ), "MulDoublesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(lhs * rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java index 84795093e0b62..d5cd595ebc44a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/NegTests.java @@ -10,12 +10,19 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.function.scalar.AbstractScalarFunctionTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; +import java.time.Duration; +import java.time.Period; +import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.function.Supplier; @@ -24,7 +31,7 @@ public class NegTests extends AbstractScalarFunctionTestCase { - public NegTests(@Name("TestCase") Supplier testCaseSupplier) { + public NegTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +40,8 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Integer", () -> { // Ensure we don't have an overflow int arg = randomIntBetween((Integer.MIN_VALUE + 1), Integer.MAX_VALUE); - return new TestCase( - List.of(new TypedData(arg, DataTypes.INTEGER, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.INTEGER, "arg")), "NegIntsEvaluator[v=Attribute[channel=0]]", DataTypes.INTEGER, equalTo(Math.negateExact(arg)) @@ -42,20 +49,36 @@ public static Iterable parameters() { }), new TestCaseSupplier("Long", () -> { // Ensure we don't have an overflow long arg = randomLongBetween((Long.MIN_VALUE + 1), Long.MAX_VALUE); - return new TestCase( - List.of(new TypedData(arg, DataTypes.LONG, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.LONG, "arg")), "NegLongsEvaluator[v=Attribute[channel=0]]", DataTypes.LONG, equalTo(Math.negateExact(arg)) ); }), new TestCaseSupplier("Double", () -> { double arg = randomDouble(); - return new TestCase( - List.of(new TypedData(arg, DataTypes.DOUBLE, "arg")), + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, DataTypes.DOUBLE, "arg")), "NegDoublesEvaluator[v=Attribute[channel=0]]", DataTypes.DOUBLE, equalTo(-arg) ); + }), new TestCaseSupplier("Duration", () -> { + Duration arg = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, EsqlDataTypes.TIME_DURATION, "arg")), + "NegDurationEvaluator[v=Attribute[channel=0]]", + EsqlDataTypes.TIME_DURATION, + equalTo(arg.negated()) + ); + }), new TestCaseSupplier("Period", () -> { + Period arg = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of(new TestCaseSupplier.TypedData(arg, EsqlDataTypes.DATE_PERIOD, "arg")), + "NegPeriodEvaluator[v=Attribute[channel=0]]", + EsqlDataTypes.DATE_PERIOD, + equalTo(arg.negated()) + ); }))); } @@ -67,7 +90,10 @@ protected Expression build(Source source, List args) { @Override protected List argSpec() { // More precisely: numerics without unsigned longs; however, `Neg::resolveType` uses `numeric`. - return List.of(required(numerics())); + List types = new ArrayList<>(Arrays.asList(numerics())); + types.add(EsqlDataTypes.DATE_PERIOD); + types.add(EsqlDataTypes.TIME_DURATION); + return List.of(required(types.toArray(DataType[]::new))); } @Override @@ -112,14 +138,47 @@ public void testEdgeCases() { return; } + if (testCaseType == EsqlDataTypes.DATE_PERIOD) { + Period minPeriod = Period.of(Integer.MIN_VALUE, Integer.MIN_VALUE, Integer.MIN_VALUE); + assertNull(process(minPeriod)); + assertCriticalWarnings( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "java.lang.ArithmeticException: integer overflow" + ); + + Period maxPeriod = Period.of(Integer.MAX_VALUE, Integer.MAX_VALUE, Integer.MAX_VALUE); + Period negatedMaxPeriod = Period.of(-Integer.MAX_VALUE, -Integer.MAX_VALUE, -Integer.MAX_VALUE); + assertEquals(negatedMaxPeriod, process(maxPeriod)); + return; + } + if (testCaseType == EsqlDataTypes.TIME_DURATION) { + Duration minDuration = Duration.ofSeconds(Long.MIN_VALUE, 0); + assertNull(process(minDuration)); + assertCriticalWarnings( + "Line -1:-1: evaluation of [] failed, treating result as null. Only first 20 failures recorded.", + "java.lang.ArithmeticException: Exceeds capacity of Duration: 9223372036854775808000000000" + ); + + Duration maxDuration = Duration.ofSeconds(Long.MAX_VALUE, 0); + Duration negatedMaxDuration = Duration.ofSeconds(-Long.MAX_VALUE, 0); + assertEquals(negatedMaxDuration, process(maxDuration)); + + return; + } throw new AssertionError("Edge cases not tested for negation with type [" + testCaseType.typeName() + "]"); } - private Object process(Number val) { - return toJavaObject(evaluator(new Neg(Source.EMPTY, field("val", typeOf(val)))).get().eval(row(List.of(val))), 0); + private Object process(Object val) { + if (testCase.allTypesAreRepresentable()) { + Neg neg = new Neg(Source.EMPTY, field("val", typeOf(val))); + return toJavaObject(evaluator(neg).get().eval(row(List.of(val))), 0); + } else { // just fold if type is not representable + Neg neg = new Neg(Source.EMPTY, new Literal(Source.EMPTY, val, typeOf(val))); + return neg.fold(); + } } - private DataType typeOf(Number val) { + private static DataType typeOf(Object val) { if (val instanceof Integer) { return DataTypes.INTEGER; } @@ -129,6 +188,12 @@ private DataType typeOf(Number val) { if (val instanceof Double) { return DataTypes.DOUBLE; } + if (val instanceof Duration) { + return EsqlDataTypes.TIME_DURATION; + } + if (val instanceof Period) { + return EsqlDataTypes.DATE_PERIOD; + } throw new UnsupportedOperationException("unsupported type [" + val.getClass() + "]"); } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java index d2d0f103d64f6..11496154f0809 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/arithmetic/SubTests.java @@ -10,20 +10,31 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.tree.Source; +import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import java.math.BigInteger; +import java.time.Duration; +import java.time.Period; +import java.time.temporal.TemporalAmount; import java.util.List; import java.util.function.Supplier; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isDateTimeOrTemporal; +import static org.elasticsearch.xpack.esql.type.EsqlDataTypes.isTemporalAmount; +import static org.elasticsearch.xpack.ql.type.DataTypes.isDateTime; +import static org.elasticsearch.xpack.ql.type.DateUtils.asDateTime; +import static org.elasticsearch.xpack.ql.type.DateUtils.asMillis; import static org.elasticsearch.xpack.ql.util.NumericUtils.asLongUnsigned; import static org.elasticsearch.xpack.ql.util.NumericUtils.unsignedLongAsBigInteger; import static org.hamcrest.Matchers.equalTo; -public class SubTests extends AbstractArithmeticTestCase { - public SubTests(@Name("TestCase") Supplier testCaseSupplier) { +public class SubTests extends AbstractDateTimeArithmeticTestCase { + public SubTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +44,11 @@ public static Iterable parameters() { // Ensure we don't have an overflow int rhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1); int lhs = randomIntBetween((Integer.MIN_VALUE >> 1) - 1, (Integer.MAX_VALUE >> 1) - 1); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "SubIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.INTEGER, equalTo(lhs - rhs) @@ -43,8 +57,11 @@ public static Iterable parameters() { // Ensure we don't have an overflow long rhs = randomLongBetween((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1); long lhs = randomLongBetween((Long.MIN_VALUE >> 1) - 1, (Long.MAX_VALUE >> 1) - 1); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.LONG, "lhs"), new TypedData(rhs, DataTypes.LONG, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.LONG, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.LONG, "rhs") + ), "SubLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.LONG, equalTo(lhs - rhs) @@ -52,8 +69,11 @@ public static Iterable parameters() { }), new TestCaseSupplier("Double - Double", () -> { double rhs = randomDouble(); double lhs = randomDouble(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.DOUBLE, "lhs"), new TypedData(rhs, DataTypes.DOUBLE, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DOUBLE, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.DOUBLE, "rhs") + ), "SubDoublesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.DOUBLE, equalTo(lhs - rhs) @@ -71,9 +91,39 @@ public static Iterable parameters() { "SubUnsignedLongsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", equalTo(asLongUnsigned(lhsBI.subtract(rhsBI).longValue())) ); - }) - */ - )); + }) */, new TestCaseSupplier("Datetime - Period", () -> { + long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); + Period rhs = (Period) randomLiteral(EsqlDataTypes.DATE_PERIOD).value(); + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.DATE_PERIOD, "rhs") + ), + "SubDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + DataTypes.DATETIME, + equalTo(asMillis(asDateTime(lhs).minus(rhs))) + ); + }), new TestCaseSupplier("Datetime - Duration", () -> { + long lhs = (Long) randomLiteral(DataTypes.DATETIME).value(); + Duration rhs = (Duration) randomLiteral(EsqlDataTypes.TIME_DURATION).value(); + TestCaseSupplier.TestCase testCase = new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.DATETIME, "lhs"), + new TestCaseSupplier.TypedData(rhs, EsqlDataTypes.TIME_DURATION, "rhs") + ), + "SubDatetimesEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", + DataTypes.DATETIME, + equalTo(asMillis(asDateTime(lhs).minus(rhs))) + ); + return testCase; + }))); + } + + @Override + protected boolean supportsTypes(DataType lhsType, DataType rhsType) { + return isDateTimeOrTemporal(lhsType) || isDateTimeOrTemporal(rhsType) + ? isDateTime(lhsType) && isTemporalAmount(rhsType) + : super.supportsTypes(lhsType, rhsType); } @Override @@ -102,4 +152,9 @@ protected long expectedUnsignedLongValue(long lhs, long rhs) { BigInteger rhsBI = unsignedLongAsBigInteger(rhs); return asLongUnsigned(lhsBI.subtract(rhsBI).longValue()); } + + @Override + protected long expectedValue(long datetime, TemporalAmount temporalAmount) { + return asMillis(asDateTime(datetime).minus(temporalAmount)); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java index e21b9947271fa..1edb00e474e3c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/AbstractBinaryComparisonTestCase.java @@ -8,7 +8,9 @@ package org.elasticsearch.xpack.esql.expression.predicate.operator.comparison; import org.elasticsearch.xpack.esql.analysis.Verifier; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.esql.expression.predicate.operator.AbstractBinaryOperatorTestCase; +import org.elasticsearch.xpack.esql.type.EsqlDataTypes; import org.elasticsearch.xpack.ql.common.Failure; import org.elasticsearch.xpack.ql.expression.predicate.BinaryOperator; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; @@ -41,7 +43,7 @@ protected final Matcher resultMatcher(List data, DataType dataTy } @Override - protected Matcher resultsMatcher(List typedData) { + protected Matcher resultsMatcher(List typedData) { Number lhs = (Number) typedData.get(0).data(); Number rhs = (Number) typedData.get(1).data(); if (typedData.stream().anyMatch(t -> t.type().equals(DataTypes.DOUBLE))) { @@ -69,7 +71,12 @@ protected final boolean supportsType(DataType type) { if (type == DataTypes.BOOLEAN) { return isEquality(); } - return true; + return EsqlDataTypes.isRepresentable(type); + } + + @Override + protected boolean supportsTypes(DataType lhsType, DataType rhsType) { + return super.supportsTypes(lhsType, rhsType) && (lhsType == rhsType || lhsType.isNumeric() && rhsType.isNumeric()); } @Override @@ -94,7 +101,9 @@ protected final void validateType(BinaryOperator op, DataType lhsTyp ); return; } - if (lhsType == rhsType || lhsType.isNumeric() && rhsType.isNumeric()) { + if (lhsType == rhsType + || lhsType.isNumeric() && rhsType.isNumeric() + || DataTypes.isString(lhsType) && DataTypes.isString(rhsType)) { assertThat(op.toString(), f, nullValue()); return; } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java index 60dcccc0f4a2d..0a1e9bdfaf34b 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/EqualsTests.java @@ -10,9 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -23,7 +24,7 @@ import static org.hamcrest.Matchers.equalTo; public class EqualsTests extends AbstractBinaryComparisonTestCase { - public EqualsTests(@Name("TestCase") Supplier testCaseSupplier) { + public EqualsTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -32,8 +33,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Int == Int", () -> { int rhs = randomInt(); int lhs = randomInt(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "EqualsIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.BOOLEAN, equalTo(lhs == rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java index c108f965f6e68..ad8dba7d63065 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanOrEqualTests.java @@ -10,9 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class GreaterThanOrEqualTests extends AbstractBinaryComparisonTestCase { - public GreaterThanOrEqualTests(@Name("TestCase") Supplier testCaseSupplier) { + public GreaterThanOrEqualTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Int >= Int", () -> { int rhs = randomInt(); int lhs = randomInt(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "GreaterThanOrEqualIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.BOOLEAN, equalTo(lhs >= rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java index 561cde534e47e..b631a742f7885 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/GreaterThanTests.java @@ -10,9 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class GreaterThanTests extends AbstractBinaryComparisonTestCase { - public GreaterThanTests(@Name("TestCase") Supplier testCaseSupplier) { + public GreaterThanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Int > Int", () -> { int rhs = randomInt(); int lhs = randomInt(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "GreaterThanIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.BOOLEAN, equalTo(lhs > rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java index bec73c260776d..7864a0dda9fe3 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanOrEqualTests.java @@ -10,9 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class LessThanOrEqualTests extends AbstractBinaryComparisonTestCase { - public LessThanOrEqualTests(@Name("TestCase") Supplier testCaseSupplier) { + public LessThanOrEqualTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Int <= Int", () -> { int rhs = randomInt(); int lhs = randomInt(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "LessThanOrEqualIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.BOOLEAN, equalTo(lhs <= rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java index aa80d08c56605..826e88551077d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/LessThanTests.java @@ -10,9 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class LessThanTests extends AbstractBinaryComparisonTestCase { - public LessThanTests(@Name("TestCase") Supplier testCaseSupplier) { + public LessThanTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Int < Int", () -> { int rhs = randomInt(); int lhs = randomInt(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "LessThanIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.BOOLEAN, equalTo(lhs < rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java index cc25e4169a441..0d6bb32fe2488 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/expression/predicate/operator/comparison/NotEqualsTests.java @@ -10,9 +10,10 @@ import com.carrotsearch.randomizedtesting.annotations.Name; import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.expression.function.TestCaseSupplier; import org.elasticsearch.xpack.ql.expression.Expression; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataTypes; import org.hamcrest.Matcher; @@ -24,7 +25,7 @@ import static org.hamcrest.Matchers.equalTo; public class NotEqualsTests extends AbstractBinaryComparisonTestCase { - public NotEqualsTests(@Name("TestCase") Supplier testCaseSupplier) { + public NotEqualsTests(@Name("TestCase") Supplier testCaseSupplier) { this.testCase = testCaseSupplier.get(); } @@ -33,8 +34,11 @@ public static Iterable parameters() { return parameterSuppliersFromTypedData(List.of(new TestCaseSupplier("Int != Int", () -> { int rhs = randomInt(); int lhs = randomInt(); - return new TestCase( - List.of(new TypedData(lhs, DataTypes.INTEGER, "lhs"), new TypedData(rhs, DataTypes.INTEGER, "rhs")), + return new TestCaseSupplier.TestCase( + List.of( + new TestCaseSupplier.TypedData(lhs, DataTypes.INTEGER, "lhs"), + new TestCaseSupplier.TypedData(rhs, DataTypes.INTEGER, "rhs") + ), "NotEqualsIntsEvaluator[lhs=Attribute[channel=0], rhs=Attribute[channel=1]]", DataTypes.BOOLEAN, equalTo(lhs != rhs) diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java index 8071e2ef932ba..c6759b66ee69d 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatTests.java @@ -19,6 +19,8 @@ import org.elasticsearch.xpack.esql.action.EsqlQueryResponse; import org.elasticsearch.xpack.ql.util.StringUtils; +import java.io.IOException; +import java.io.StringWriter; import java.util.Arrays; import java.util.List; import java.util.Set; @@ -48,20 +50,57 @@ public void testTsvContentType() { } public void testCsvEscaping() { - assertEquals("string", CSV.maybeEscape("string", CSV.delimiter())); - assertEquals("", CSV.maybeEscape("", CSV.delimiter())); - assertEquals("\"\"\"\"", CSV.maybeEscape("\"", CSV.delimiter())); - assertEquals("\"\"\",\"\"\"", CSV.maybeEscape("\",\"", CSV.delimiter())); - assertEquals("\"\"\"quo\"\"ted\"\"\"", CSV.maybeEscape("\"quo\"ted\"", CSV.delimiter())); - assertEquals("\"one;two\"", CSV.maybeEscape("one;two", ';')); + assertEscapedCorrectly("string", CSV, CSV.delimiter(), "string"); + assertEscapedCorrectly("", CSV, CSV.delimiter(), ""); + assertEscapedCorrectly("\"", CSV, CSV.delimiter(), "\"\"\"\""); + assertEscapedCorrectly("\",\"", CSV, CSV.delimiter(), "\"\"\",\"\"\""); + assertEscapedCorrectly("\"quo\"ted\"", CSV, CSV.delimiter(), "\"\"\"quo\"\"ted\"\"\""); + assertEscapedCorrectly("one;two", CSV, ';', "\"one;two\""); + assertEscapedCorrectly("one\ntwo", CSV, CSV.delimiter(), "\"one\ntwo\""); + assertEscapedCorrectly("one\rtwo", CSV, CSV.delimiter(), "\"one\rtwo\""); + + final String inputString = randomStringForEscaping(); + final String expectedResult; + if (inputString.contains(",") || inputString.contains("\n") || inputString.contains("\r") || inputString.contains("\"")) { + expectedResult = "\"" + inputString.replaceAll("\"", "\"\"") + "\""; + } else { + expectedResult = inputString; + } + assertEscapedCorrectly(inputString, CSV, ',', expectedResult); } public void testTsvEscaping() { - assertEquals("string", TSV.maybeEscape("string", null)); - assertEquals("", TSV.maybeEscape("", null)); - assertEquals("\"", TSV.maybeEscape("\"", null)); - assertEquals("\\t", TSV.maybeEscape("\t", null)); - assertEquals("\\n\"\\t", TSV.maybeEscape("\n\"\t", null)); + assertEscapedCorrectly("string", TSV, null, "string"); + assertEscapedCorrectly("", TSV, null, ""); + assertEscapedCorrectly("\"", TSV, null, "\""); + assertEscapedCorrectly("\t", TSV, null, "\\t"); + assertEscapedCorrectly("\n\"\t", TSV, null, "\\n\"\\t"); + + final var inputString = randomStringForEscaping(); + final var expectedResult = new StringBuilder(); + for (int i = 0; i < inputString.length(); i++) { + final var c = inputString.charAt(i); + switch (c) { + case '\n' -> expectedResult.append("\\n"); + case '\t' -> expectedResult.append("\\t"); + default -> expectedResult.append(c); + } + } + assertEscapedCorrectly(inputString, TSV, null, expectedResult.toString()); + } + + private static String randomStringForEscaping() { + return String.join("", randomList(20, () -> randomFrom("a", "b", ",", ";", "\n", "\r", "\t", "\""))); + } + + private static void assertEscapedCorrectly(String inputString, TextFormat format, Character delimiter, String expectedString) { + try (var writer = new StringWriter()) { + format.writeEscaped(inputString, delimiter, writer); + writer.flush(); + assertEquals(expectedString, writer.toString()); + } catch (IOException e) { + throw new AssertionError("impossible", e); + } } public void testCsvFormatWithEmptyData() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java index 8fa881c4e4dd5..fa0b01dc3366a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/formatter/TextFormatterTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.esql.formatter; import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Strings; import org.elasticsearch.compute.data.Block; import org.elasticsearch.compute.data.BytesRefBlock; import org.elasticsearch.compute.data.DoubleArrayVector; @@ -134,4 +135,32 @@ public void testFormatWithoutHeader() { result[1] ); } + + public void testVeryLongPadding() { + final var smallFieldContent = "is twenty characters"; + final var largeFieldContent = "a".repeat(between(smallFieldContent.length(), 200)); + final var paddingLength = largeFieldContent.length() - smallFieldContent.length(); + assertEquals( + Strings.format(""" + is twenty characters%s + aaaaaaaaaaaaaaaaaaaa%s + """, " ".repeat(paddingLength), "a".repeat(paddingLength)), + getTextBodyContent( + new TextFormatter( + new EsqlQueryResponse( + List.of(new ColumnInfo("foo", "keyword")), + List.of( + new Page( + BytesRefBlock.newBlockBuilder(2) + .appendBytesRef(new BytesRef(smallFieldContent)) + .appendBytesRef(new BytesRef(largeFieldContent)) + .build() + ) + ), + randomBoolean() + ) + ).format(false) + ) + ); + } } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java index b4e5f46fd1d68..a5cb0044d91d0 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanNamedTypesTests.java @@ -17,6 +17,13 @@ import org.elasticsearch.test.EqualsHashCodeTestUtils; import org.elasticsearch.xpack.esql.EsqlTestUtils; import org.elasticsearch.xpack.esql.SerializationTestUtils; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.NotEquals; +import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.UnsupportedAttribute; import org.elasticsearch.xpack.esql.expression.function.aggregate.Avg; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; @@ -36,6 +43,7 @@ import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mod; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Sub; +import org.elasticsearch.xpack.esql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.physical.AggregateExec; import org.elasticsearch.xpack.esql.plan.physical.DissectExec; @@ -65,23 +73,16 @@ import org.elasticsearch.xpack.ql.expression.NameId; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Nullability; -import org.elasticsearch.xpack.ql.expression.Order; import org.elasticsearch.xpack.ql.expression.function.aggregate.AggregateFunction; import org.elasticsearch.xpack.ql.expression.predicate.operator.arithmetic.ArithmeticOperation; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NotEquals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.NullEquals; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; import org.elasticsearch.xpack.ql.type.DateEsField; import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.type.InvalidMappedField; import org.elasticsearch.xpack.ql.type.KeywordEsField; import org.elasticsearch.xpack.ql.type.TextEsField; import org.elasticsearch.xpack.ql.type.UnsupportedEsField; @@ -168,20 +169,19 @@ public void testUnsupportedAttributeSimple() throws IOException { "foo", new UnsupportedEsField("foo", "keyword"), "field not supported", - new NameId(53) + new NameId() ); BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry); PlanNamedTypes.writeUnsupportedAttr(out, orig); - var deser = PlanNamedTypes.readUnsupportedAttr(planStreamInput(bso)); + var in = planStreamInput(bso); + var deser = PlanNamedTypes.readUnsupportedAttr(in); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - assertThat(deser.id(), equalTo(orig.id())); + assertThat(deser.id(), equalTo(in.nameIdFromLongValue(Long.parseLong(orig.id().toString())))); } public void testUnsupportedAttribute() { - Stream.generate(PlanNamedTypesTests::randomUnsupportedAttribute) - .limit(100) - .forEach(PlanNamedTypesTests::assertNamedExpressionAndId); + Stream.generate(PlanNamedTypesTests::randomUnsupportedAttribute).limit(100).forEach(PlanNamedTypesTests::assertNamedExpression); } public void testFieldAttributeSimple() throws IOException { @@ -193,19 +193,20 @@ public void testFieldAttributeSimple() throws IOException { randomEsField(), null, // qualifier, can be null Nullability.TRUE, - new NameId(53), + new NameId(), true // synthetic ); BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry); PlanNamedTypes.writeFieldAttribute(out, orig); - var deser = PlanNamedTypes.readFieldAttribute(planStreamInput(bso)); + var in = planStreamInput(bso); + var deser = PlanNamedTypes.readFieldAttribute(in); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - assertThat(deser.id(), equalTo(orig.id())); + assertThat(deser.id(), equalTo(in.nameIdFromLongValue(Long.parseLong(orig.id().toString())))); } public void testFieldAttribute() { - Stream.generate(PlanNamedTypesTests::randomFieldAttribute).limit(100).forEach(PlanNamedTypesTests::assertNamedExpressionAndId); + Stream.generate(PlanNamedTypesTests::randomFieldAttribute).limit(100).forEach(PlanNamedTypesTests::assertNamedExpression); } public void testKeywordEsFieldSimple() throws IOException { @@ -246,6 +247,19 @@ public void testTextEsField() { Stream.generate(PlanNamedTypesTests::randomTextEsField).limit(100).forEach(PlanNamedTypesTests::assertNamedEsField); } + public void testInvalidMappedFieldSimple() throws IOException { + var orig = new InvalidMappedField("foo", "bar"); + BytesStreamOutput bso = new BytesStreamOutput(); + PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry); + PlanNamedTypes.writeInvalidMappedField(out, orig); + var deser = PlanNamedTypes.readInvalidMappedField(planStreamInput(bso)); + EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); + } + + public void testInvalidMappedField() { + Stream.generate(PlanNamedTypesTests::randomInvalidMappedField).limit(100).forEach(PlanNamedTypesTests::assertNamedEsField); + } + public void testEsDateFieldSimple() throws IOException { var orig = DateEsField.dateEsField("birth_date", Map.of(), false); BytesStreamOutput bso = new BytesStreamOutput(); @@ -339,9 +353,10 @@ public void testAliasSimple() throws IOException { BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry); PlanNamedTypes.writeAlias(out, orig); - var deser = PlanNamedTypes.readAlias(planStreamInput(bso)); + var in = planStreamInput(bso); + var deser = PlanNamedTypes.readAlias(in); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); - assertThat(orig.id(), equalTo(deser.id())); + assertThat(deser.id(), equalTo(in.nameIdFromLongValue(Long.parseLong(orig.id().toString())))); } public void testLiteralSimple() throws IOException { @@ -358,7 +373,7 @@ public void testOrderSimple() throws IOException { BytesStreamOutput bso = new BytesStreamOutput(); PlanStreamOutput out = new PlanStreamOutput(bso, planNameRegistry); PlanNamedTypes.writeOrder(out, orig); - var deser = PlanNamedTypes.readOrder(planStreamInput(bso)); + var deser = (Order) PlanNamedTypes.readOrder(planStreamInput(bso)); EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } @@ -390,10 +405,9 @@ public void testDissectParserSimple() throws IOException { EqualsHashCodeTestUtils.checkEqualsAndHashCode(orig, unused -> deser); } - private static void assertNamedExpressionAndId(NamedExpression origObj) { + private static void assertNamedExpression(NamedExpression origObj) { var deserObj = serializeDeserialize(origObj, PlanStreamOutput::writeExpression, PlanStreamInput::readNamedExpression); EqualsHashCodeTestUtils.checkEqualsAndHashCode(origObj, unused -> deserObj); - assertThat(deserObj.id(), equalTo(origObj.id())); } private static void assertNamedType(Class type, T origObj) { @@ -454,6 +468,13 @@ static TextEsField randomTextEsField() { ); } + static InvalidMappedField randomInvalidMappedField() { + return new InvalidMappedField( + randomAlphaOfLength(randomIntBetween(1, 25)), // name + randomAlphaOfLength(randomIntBetween(1, 25)) // error message + ); + } + static BinaryComparison randomBinaryComparison() { int v = randomIntBetween(0, 6); var left = field(randomName(), randomDataType()); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java new file mode 100644 index 0000000000000..fe885931d3114 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/io/stream/PlanStreamInputTests.java @@ -0,0 +1,89 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.io.stream; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.ql.expression.NameId; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +public class PlanStreamInputTests extends ESTestCase { + + public void testMapperSimple() { + var mapper = new PlanStreamInput.NameIdMapper(); + + NameId first = mapper.apply(1L); + NameId second = mapper.apply(1L); + assertThat(second, equalTo(first)); + + NameId third = mapper.apply(2L); + NameId fourth = mapper.apply(2L); + assertThat(third, not(equalTo(second))); + assertThat(fourth, equalTo(third)); + + assertThat(mapper.seen.size(), is(2)); + } + + public void testMapper() { + List longs = randomLongsListOfSize(100); + List nameIds = new ArrayList<>(); + for (long l : longs) { + nameIds.add(l); + if (randomBoolean()) { // randomly insert additional values from the known list + int idx = randomIntBetween(0, longs.size() - 1); + nameIds.add(longs.get(idx)); + } + } + + var mapper = new PlanStreamInput.NameIdMapper(); + List mappedIds = nameIds.stream().map(mapper::apply).toList(); + assertThat(mappedIds.size(), is(nameIds.size())); + // there must be exactly 100 distinct elements + assertThat(mapper.seen.size(), is(100)); + assertThat(mappedIds.stream().distinct().count(), is(100L)); + + // The pre-mapped name id pattern must match that of the mapped one + Map> nameIdsSeen = new LinkedHashMap<>(); // insertion order + for (int i = 0; i < nameIds.size(); i++) { + long value = nameIds.get(i); + nameIdsSeen.computeIfAbsent(value, k -> new ArrayList<>()); + nameIdsSeen.get(value).add((long) i); + } + assert nameIdsSeen.size() == 100; + + Map> mappedSeen = new LinkedHashMap<>(); // insertion order + for (int i = 0; i < mappedIds.size(); i++) { + NameId nameId = mappedIds.get(i); + mappedSeen.computeIfAbsent(nameId, k -> new ArrayList<>()); + mappedSeen.get(nameId).add((long) i); + } + assert mappedSeen.size() == 100; + + var mappedSeenItr = mappedSeen.values().iterator(); + for (List indexes : nameIdsSeen.values()) { + assertThat(indexes, equalTo(mappedSeenItr.next())); + } + } + + List randomLongsListOfSize(int size) { + Set longs = new HashSet<>(); + while (longs.size() < size) { + longs.add(randomLong()); + } + return longs.stream().toList(); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java index f9a7e9d906c29..f242c1e082829 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LocalLogicalPlanOptimizerTests.java @@ -246,7 +246,7 @@ public void testMissingFieldInFilterNoProjection() { var local = as(localPlan, LocalRelation.class); assertThat( Expressions.names(local.output()), - contains("_meta_field", "emp_no", "first_name", "gender", "languages", "last_name", "salary", "x") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary", "x") ); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java index e936cef3c7a67..b6d818038a72c 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/LogicalPlanOptimizerTests.java @@ -17,6 +17,13 @@ import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.Verifier; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.RLike; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.WildcardLike; +import org.elasticsearch.xpack.esql.expression.Order; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.aggregate.Count; import org.elasticsearch.xpack.esql.expression.function.aggregate.Percentile; @@ -48,17 +55,12 @@ import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Nullability; -import org.elasticsearch.xpack.ql.expression.Order; import org.elasticsearch.xpack.ql.expression.ReferenceAttribute; import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; import org.elasticsearch.xpack.ql.expression.predicate.nulls.IsNotNull; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.regex.RLike; -import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; +import org.elasticsearch.xpack.ql.expression.predicate.regex.RLikePattern; +import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardPattern; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; @@ -86,12 +88,7 @@ import static org.elasticsearch.xpack.esql.EsqlTestUtils.emptySource; import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; import static org.elasticsearch.xpack.esql.EsqlTestUtils.localSource; -import static org.elasticsearch.xpack.ql.TestUtils.greaterThanOf; -import static org.elasticsearch.xpack.ql.TestUtils.greaterThanOrEqualOf; -import static org.elasticsearch.xpack.ql.TestUtils.lessThanOf; import static org.elasticsearch.xpack.ql.TestUtils.relation; -import static org.elasticsearch.xpack.ql.TestUtils.rlike; -import static org.elasticsearch.xpack.ql.TestUtils.wildcardLike; import static org.elasticsearch.xpack.ql.tree.Source.EMPTY; import static org.elasticsearch.xpack.ql.type.DataTypes.INTEGER; import static org.hamcrest.Matchers.contains; @@ -286,6 +283,18 @@ public void testMultipleCombineLimits() { assertEquals(new Limit(EMPTY, L(minimum), relation), new LogicalPlanOptimizer().optimize(plan)); } + public static GreaterThan greaterThanOf(Expression left, Expression right) { + return new GreaterThan(EMPTY, left, right, randomZone()); + } + + public static LessThan lessThanOf(Expression left, Expression right) { + return new LessThan(EMPTY, left, right, randomZone()); + } + + public static GreaterThanOrEqual greaterThanOrEqualOf(Expression left, Expression right) { + return new GreaterThanOrEqual(EMPTY, left, right, randomZone()); + } + public void testCombineFilters() { EsRelation relation = relation(); GreaterThan conditionA = greaterThanOf(getFieldAttribute("a"), ONE); @@ -330,7 +339,7 @@ public void testPushDownFilter() { public void testPushDownLikeRlikeFilter() { EsRelation relation = relation(); - RLike conditionA = rlike(getFieldAttribute("a"), "foo"); + org.elasticsearch.xpack.ql.expression.predicate.regex.RLike conditionA = rlike(getFieldAttribute("a"), "foo"); WildcardLike conditionB = wildcardLike(getFieldAttribute("b"), "bar"); Filter fa = new Filter(EMPTY, relation, conditionA); @@ -1418,6 +1427,43 @@ public void testSplittingInWithFoldableValue() { assertThat(new LogicalPlanOptimizer.SplitInWithFoldableValue().rule(in), equalTo(expected)); } + public void testReplaceFilterWithExact() { + var plan = plan(""" + from test + | where job == "foo" + """); + + var limit = as(plan, Limit.class); + var filter = as(limit.child(), Filter.class); + Equals equals = as(filter.condition(), Equals.class); + FieldAttribute left = as(equals.left(), FieldAttribute.class); + assertThat(left.name(), equalTo("job.raw")); + } + + public void testReplaceExpressionWithExact() { + var plan = plan(""" + from test + | eval x = job + """); + + var eval = as(plan, Eval.class); + var alias = as(eval.fields().get(0), Alias.class); + var field = as(alias.child(), FieldAttribute.class); + assertThat(field.name(), equalTo("job")); + } + + public void testReplaceSortWithExact() { + var plan = plan(""" + from test + | sort job + """); + + var topN = as(plan, TopN.class); + assertThat(topN.order().size(), equalTo(1)); + var sortField = as(topN.order().get(0).child(), FieldAttribute.class); + assertThat(sortField.name(), equalTo("job.raw")); + } + public void testPruneUnusedEval() { var plan = plan(""" from test @@ -1616,4 +1662,13 @@ private static FieldAttribute getFieldAttribute(String name) { private static FieldAttribute getFieldAttribute(String name, DataType dataType) { return new FieldAttribute(EMPTY, name, new EsField(name + "f", dataType, emptyMap(), true)); } + + public static WildcardLike wildcardLike(Expression left, String exp) { + return new WildcardLike(EMPTY, left, new WildcardPattern(exp)); + } + + public static RLike rlike(Expression left, String exp) { + return new RLike(EMPTY, left, new RLikePattern(exp)); + } + } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java index 1aa23789a96c3..83d71d2cc3ed1 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/optimizer/PhysicalPlanOptimizerTests.java @@ -27,6 +27,11 @@ import org.elasticsearch.xpack.esql.analysis.EnrichResolution; import org.elasticsearch.xpack.esql.analysis.Verifier; import org.elasticsearch.xpack.esql.enrich.EnrichPolicyResolution; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Round; import org.elasticsearch.xpack.esql.parser.EsqlParser; @@ -62,16 +67,12 @@ import org.elasticsearch.xpack.ql.expression.Attribute; import org.elasticsearch.xpack.ql.expression.Expressions; import org.elasticsearch.xpack.ql.expression.FieldAttribute; +import org.elasticsearch.xpack.ql.expression.Literal; import org.elasticsearch.xpack.ql.expression.MetadataAttribute; import org.elasticsearch.xpack.ql.expression.NamedExpression; import org.elasticsearch.xpack.ql.expression.Order; import org.elasticsearch.xpack.ql.expression.function.FunctionRegistry; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexResolution; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -79,6 +80,7 @@ import org.elasticsearch.xpack.ql.type.EsField; import org.junit.Before; +import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; @@ -155,7 +157,14 @@ public void init() { mapping = loadMapping("mapping-basic.json"); allFieldRowSize = mapping.values() .stream() - .mapToInt(f -> EstimatesRowSize.estimateSize(EsqlDataTypes.widenSmallNumericTypes(f.getDataType()))) + .mapToInt( + f -> (EstimatesRowSize.estimateSize(EsqlDataTypes.widenSmallNumericTypes(f.getDataType())) + f.getProperties() + .values() + .stream() + // check one more level since the mapping contains TEXT fields with KEYWORD multi-fields + .mapToInt(x -> EstimatesRowSize.estimateSize(EsqlDataTypes.widenSmallNumericTypes(x.getDataType()))) + .sum()) + ) .sum(); EsIndex test = new EsIndex("test", mapping); IndexResolution getIndexResult = IndexResolution.valid(test); @@ -205,13 +214,25 @@ public void testSingleFieldExtractor() { var filter = as(limit.child(), FilterExec.class); var extract = as(filter.child(), FieldExtractExec.class); - assertEquals(Sets.difference(mapping.keySet(), Set.of("emp_no")), Sets.newHashSet(names(restExtract.attributesToExtract()))); + assertEquals(Sets.difference(allFields(mapping), Set.of("emp_no")), Sets.newHashSet(names(restExtract.attributesToExtract()))); assertEquals(Set.of("emp_no"), Sets.newHashSet(names(extract.attributesToExtract()))); var query = as(extract.child(), EsQueryExec.class); assertThat(query.estimatedRowSize(), equalTo(Integer.BYTES + allFieldRowSize)); } + private Set allFields(Map mapping) { + Set result = new HashSet<>(); + for (Map.Entry entry : mapping.entrySet()) { + String key = entry.getKey(); + result.add(key); + for (Map.Entry sub : entry.getValue().getProperties().entrySet()) { + result.add(key + "." + sub.getKey()); + } + } + return result; + } + public void testExactlyOneExtractorPerFieldWithPruning() { var plan = physicalPlan(""" from test @@ -229,7 +250,7 @@ public void testExactlyOneExtractorPerFieldWithPruning() { var filter = as(limit.child(), FilterExec.class); var extract = as(filter.child(), FieldExtractExec.class); - assertEquals(Sets.difference(mapping.keySet(), Set.of("emp_no")), Sets.newHashSet(names(restExtract.attributesToExtract()))); + assertEquals(Sets.difference(allFields(mapping), Set.of("emp_no")), Sets.newHashSet(names(restExtract.attributesToExtract()))); assertThat(names(extract.attributesToExtract()), contains("emp_no")); var query = source(extract.child()); @@ -370,7 +391,7 @@ public void testExtractorMultiEvalWithDifferentNames() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") ); } @@ -400,7 +421,7 @@ public void testExtractorMultiEvalWithSameName() { var extract = as(project.child(), FieldExtractExec.class); assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") ); } @@ -857,7 +878,7 @@ public void testPushLimitAndFilterToSource() { assertThat( names(extract.attributesToExtract()), - contains("_meta_field", "emp_no", "first_name", "gender", "languages", "last_name", "salary") + contains("_meta_field", "emp_no", "first_name", "gender", "job", "job.raw", "languages", "last_name", "salary") ); var source = source(extract.child()); @@ -1645,6 +1666,73 @@ public void testDontPushDownMetadataVersionAndId() { } } + public void testNoTextFilterPushDown() { + var plan = physicalPlan(""" + from test + | where gender == "M" + """); + + var optimized = optimizedPlan(plan); + var limit = as(optimized, LimitExec.class); + var exchange = asRemoteExchange(limit.child()); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var limit2 = as(extract.child(), LimitExec.class); + var filter = as(limit2.child(), FilterExec.class); + var extract2 = as(filter.child(), FieldExtractExec.class); + var source = source(extract2.child()); + assertNull(source.query()); + } + + public void testTextWithRawFilterPushDown() { + var plan = physicalPlan(""" + from test + | where job == "foo" + """); + + var optimized = optimizedPlan(plan); + var limit = as(optimized, LimitExec.class); + var exchange = asRemoteExchange(limit.child()); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var source = as(extract.child(), EsQueryExec.class); + var qb = as(source.query(), SingleValueQuery.Builder.class); + assertThat(qb.field(), equalTo("job.raw")); + } + + public void testNoTextSortPushDown() { + var plan = physicalPlan(""" + from test + | sort gender + """); + + var optimized = optimizedPlan(plan); + var topN = as(optimized, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var topN2 = as(extract.child(), TopNExec.class); + var extract2 = as(topN2.child(), FieldExtractExec.class); + var source = source(extract2.child()); + assertNull(source.sorts()); + } + + public void testTextWithRawSortPushDown() { + var plan = physicalPlan(""" + from test + | sort job + """); + + var optimized = optimizedPlan(plan); + var topN = as(optimized, TopNExec.class); + var exchange = as(topN.child(), ExchangeExec.class); + var project = as(exchange.child(), ProjectExec.class); + var extract = as(project.child(), FieldExtractExec.class); + var source = as(extract.child(), EsQueryExec.class); + assertThat(source.sorts().size(), equalTo(1)); + assertThat(source.sorts().get(0).field().name(), equalTo("job.raw")); + } + public void testFieldExtractWithoutSourceAttributes() { PhysicalPlan verifiedPlan = optimizedPlan(physicalPlan(""" from test @@ -1697,6 +1785,49 @@ public void testProjectAllFieldsWhenOnlyTheCountMatters() { var source = source(eval.child()); } + /** + * ProjectExec[[a{r}#5]] + * \_EvalExec[[__a_SUM@81823521{r}#15 / __a_COUNT@31645621{r}#16 AS a]] + * \_LimitExec[10000[INTEGER]] + * \_AggregateExec[[],[SUM(salary{f}#11) AS __a_SUM@81823521, COUNT(salary{f}#11) AS __a_COUNT@31645621],FINAL,24] + * \_AggregateExec[[],[SUM(salary{f}#11) AS __a_SUM@81823521, COUNT(salary{f}#11) AS __a_COUNT@31645621],PARTIAL,16] + * \_LimitExec[10[INTEGER]] + * \_ExchangeExec[[],false] + * \_ProjectExec[[salary{f}#11]] + * \_FieldExtractExec[salary{f}#11] + * \_EsQueryExec[test], query[][_doc{f}#17], limit[10], sort[] estimatedRowSize[8] + */ + public void testAvgSurrogateFunctionAfterRenameAndLimit() { + var plan = optimizedPlan(physicalPlan(""" + from test + | limit 10 + | rename first_name as FN + | stats a = avg(salary) + """)); + + var project = as(plan, ProjectExec.class); + var eval = as(project.child(), EvalExec.class); + var limit = as(eval.child(), LimitExec.class); + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(limit.limit().fold(), equalTo(10000)); + var aggFinal = as(limit.child(), AggregateExec.class); + assertThat(aggFinal.getMode(), equalTo(AggregateExec.Mode.FINAL)); + var aggPartial = as(aggFinal.child(), AggregateExec.class); + assertThat(aggPartial.getMode(), equalTo(AggregateExec.Mode.PARTIAL)); + limit = as(aggPartial.child(), LimitExec.class); + assertThat(limit.limit(), instanceOf(Literal.class)); + assertThat(limit.limit().fold(), equalTo(10)); + + var exchange = as(limit.child(), ExchangeExec.class); + project = as(exchange.child(), ProjectExec.class); + var expectedFields = List.of("salary"); + assertThat(Expressions.names(project.projections()), is(expectedFields)); + var fieldExtract = as(project.child(), FieldExtractExec.class); + assertThat(Expressions.names(fieldExtract.attributesToExtract()), is(expectedFields)); + var source = source(fieldExtract.child()); + assertThat(source.limit().fold(), equalTo(10)); + } + private static EsQueryExec source(PhysicalPlan plan) { if (plan instanceof ExchangeExec exchange) { plan = exchange.child(); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java index 1bb88b03b311c..ae5de9a0a13e2 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/ExpressionTests.java @@ -8,6 +8,10 @@ package org.elasticsearch.xpack.esql.parser; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Div; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Mul; @@ -24,10 +28,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.LogicalPlan; import org.elasticsearch.xpack.ql.plan.logical.Project; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java index c7ba7373fcc7a..8b6aef3f2fcc4 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/parser/StatementParserTests.java @@ -9,6 +9,13 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.RLike; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.regex.WildcardLike; import org.elasticsearch.xpack.esql.expression.predicate.operator.arithmetic.Add; import org.elasticsearch.xpack.esql.plan.logical.Dissect; import org.elasticsearch.xpack.esql.plan.logical.Enrich; @@ -29,13 +36,6 @@ import org.elasticsearch.xpack.ql.expression.function.UnresolvedFunction; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.BinaryComparison; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.regex.RLike; -import org.elasticsearch.xpack.ql.expression.predicate.regex.WildcardLike; import org.elasticsearch.xpack.ql.plan.logical.Aggregate; import org.elasticsearch.xpack.ql.plan.logical.Filter; import org.elasticsearch.xpack.ql.plan.logical.Limit; diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java index 8f5aba272357d..f42c86d4b028a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/EvalMapperTests.java @@ -14,6 +14,11 @@ import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.esql.SerializationTestUtils; import org.elasticsearch.xpack.esql.evaluator.EvalMapper; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.Equals; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.GreaterThanOrEqual; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThan; +import org.elasticsearch.xpack.esql.evaluator.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateFormat; import org.elasticsearch.xpack.esql.expression.function.scalar.date.DateTrunc; import org.elasticsearch.xpack.esql.expression.function.scalar.math.Abs; @@ -37,11 +42,6 @@ import org.elasticsearch.xpack.ql.expression.predicate.logical.And; import org.elasticsearch.xpack.ql.expression.predicate.logical.Not; import org.elasticsearch.xpack.ql.expression.predicate.logical.Or; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.Equals; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.GreaterThanOrEqual; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThan; -import org.elasticsearch.xpack.ql.expression.predicate.operator.comparison.LessThanOrEqual; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; import org.elasticsearch.xpack.ql.type.DataTypes; @@ -121,10 +121,10 @@ public EvalMapperTests(String nodeString, Expression expression) { public void testEvaluatorSuppliers() { Layout.Builder lb = new Layout.Builder(); - lb.appendChannel(DOUBLE1.id()); - lb.appendChannel(DOUBLE2.id()); - lb.appendChannel(DATE.id()); - lb.appendChannel(LONG.id()); + lb.append(DOUBLE1); + lb.append(DOUBLE2); + lb.append(DATE); + lb.append(LONG); Layout layout = lb.build(); Supplier supplier = EvalMapper.toEvaluator(expression, layout); diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java new file mode 100644 index 0000000000000..bba7d4a5dd47d --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/FilterTests.java @@ -0,0 +1,295 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.planner; + +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.ByteBufferStreamInput; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.logging.LoggerMessageFormat; +import org.elasticsearch.index.query.AbstractQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.EsqlTestUtils; +import org.elasticsearch.xpack.esql.SerializationTestUtils; +import org.elasticsearch.xpack.esql.analysis.Analyzer; +import org.elasticsearch.xpack.esql.analysis.AnalyzerContext; +import org.elasticsearch.xpack.esql.analysis.Verifier; +import org.elasticsearch.xpack.esql.expression.function.EsqlFunctionRegistry; +import org.elasticsearch.xpack.esql.optimizer.LogicalPlanOptimizer; +import org.elasticsearch.xpack.esql.optimizer.PhysicalOptimizerContext; +import org.elasticsearch.xpack.esql.optimizer.PhysicalPlanOptimizer; +import org.elasticsearch.xpack.esql.parser.EsqlParser; +import org.elasticsearch.xpack.esql.plan.physical.FragmentExec; +import org.elasticsearch.xpack.esql.plan.physical.PhysicalPlan; +import org.elasticsearch.xpack.esql.querydsl.query.SingleValueQuery; +import org.elasticsearch.xpack.esql.stats.Metrics; +import org.elasticsearch.xpack.ql.index.EsIndex; +import org.elasticsearch.xpack.ql.index.IndexResolution; +import org.elasticsearch.xpack.ql.type.EsField; +import org.elasticsearch.xpack.ql.util.Queries; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.util.Map; + +import static java.util.Arrays.asList; +import static org.elasticsearch.index.query.QueryBuilders.rangeQuery; +import static org.elasticsearch.xpack.esql.EsqlTestUtils.loadMapping; +import static org.elasticsearch.xpack.esql.SerializationTestUtils.assertSerialization; +import static org.elasticsearch.xpack.ql.util.Queries.Clause.FILTER; +import static org.elasticsearch.xpack.ql.util.Queries.Clause.MUST; +import static org.elasticsearch.xpack.ql.util.Queries.Clause.SHOULD; +import static org.hamcrest.Matchers.nullValue; + +public class FilterTests extends ESTestCase { + + // use a field that already exists in the mapping + private static final String AT_TIMESTAMP = "emp_no"; + private static final String OTHER_FIELD = "salary"; + + private static EsqlParser parser; + private static Analyzer analyzer; + private static LogicalPlanOptimizer logicalOptimizer; + private static PhysicalPlanOptimizer physicalPlanOptimizer; + private static Map mapping; + private static Mapper mapper; + + @BeforeClass + public static void init() { + parser = new EsqlParser(); + + mapping = loadMapping("mapping-basic.json"); + EsIndex test = new EsIndex("test", mapping); + IndexResolution getIndexResult = IndexResolution.valid(test); + logicalOptimizer = new LogicalPlanOptimizer(); + physicalPlanOptimizer = new PhysicalPlanOptimizer(new PhysicalOptimizerContext(EsqlTestUtils.TEST_CFG)); + mapper = new Mapper(false); + + analyzer = new Analyzer( + new AnalyzerContext(EsqlTestUtils.TEST_CFG, new EsqlFunctionRegistry(), getIndexResult, EsqlTestUtils.emptyPolicyResolution()), + new Verifier(new Metrics()) + ); + } + + public void testTimestampRequestFilterNoQueryFilter() { + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > 10 + """, OTHER_FIELD), restFilter); + + var filter = filterQueryForTransportNodes(plan); + assertEquals(restFilter.toString(), filter.toString()); + } + + public void testTimestampNoRequestFilterQueryFilter() { + var value = 10; + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > {} + """, AT_TIMESTAMP, value), null); + + var filter = filterQueryForTransportNodes(plan); + var expected = singleValueQuery(rangeQuery(AT_TIMESTAMP).gt(value), AT_TIMESTAMP); + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampRequestFilterQueryFilter() { + var value = 10; + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > 10 + """, AT_TIMESTAMP, value), restFilter); + + var filter = filterQueryForTransportNodes(plan); + var queryFilter = singleValueQuery(rangeQuery(AT_TIMESTAMP).gt(value).includeUpper(false), AT_TIMESTAMP); + var expected = Queries.combine(FILTER, asList(restFilter, queryFilter)); + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampRequestFilterQueryFilterWithConjunction() { + var lowValue = 10; + var highValue = 100; + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > {} AND {} < {} + """, AT_TIMESTAMP, lowValue, AT_TIMESTAMP, highValue), restFilter); + + var filter = filterQueryForTransportNodes(plan); + var left = singleValueQuery(rangeQuery(AT_TIMESTAMP).gt(lowValue), AT_TIMESTAMP); + var right = singleValueQuery(rangeQuery(AT_TIMESTAMP).lt(highValue), AT_TIMESTAMP); + var must = Queries.combine(MUST, asList(left, right)); + var expected = Queries.combine(FILTER, asList(restFilter, must)); + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampRequestFilterQueryFilterWithDisjunctionOnDifferentFields() { + var lowValue = 10; + var highValue = 100; + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > {} OR {} < {} + """, OTHER_FIELD, lowValue, AT_TIMESTAMP, highValue), restFilter); + + var filter = filterQueryForTransportNodes(plan); + var expected = restFilter; + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampRequestFilterQueryFilterWithDisjunctionOnSameField() { + var lowValue = 10; + var highValue = 100; + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > {} OR {} < {} + """, AT_TIMESTAMP, lowValue, AT_TIMESTAMP, highValue), restFilter); + + var filter = filterQueryForTransportNodes(plan); + var left = singleValueQuery(rangeQuery(AT_TIMESTAMP).gt(lowValue), AT_TIMESTAMP); + var right = singleValueQuery(rangeQuery(AT_TIMESTAMP).lt(highValue), AT_TIMESTAMP); + var should = Queries.combine(SHOULD, asList(left, right)); + var expected = Queries.combine(FILTER, asList(restFilter, should)); + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampRequestFilterQueryFilterWithMultiConjunction() { + var lowValue = 10; + var highValue = 100; + var eqValue = 1234; + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > {} AND {} == {} AND {} < {} + """, AT_TIMESTAMP, lowValue, OTHER_FIELD, eqValue, AT_TIMESTAMP, highValue), restFilter); + + var filter = filterQueryForTransportNodes(plan); + var left = singleValueQuery(rangeQuery(AT_TIMESTAMP).gt(lowValue), AT_TIMESTAMP); + var right = singleValueQuery(rangeQuery(AT_TIMESTAMP).lt(highValue), AT_TIMESTAMP); + var must = Queries.combine(MUST, asList(left, right)); + var expected = Queries.combine(FILTER, asList(restFilter, must)); + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampRequestFilterQueryMultipleFilters() { + var lowValue = 10; + var eqValue = 1234; + var highValue = 100; + + var restFilter = restFilterQuery(AT_TIMESTAMP); + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE {} > {} + |EVAL {} = {} + |WHERE {} > {} + """, AT_TIMESTAMP, lowValue, AT_TIMESTAMP, eqValue, AT_TIMESTAMP, highValue), restFilter); + + var filter = filterQueryForTransportNodes(plan); + var queryFilter = singleValueQuery(rangeQuery(AT_TIMESTAMP).gt(lowValue), AT_TIMESTAMP); + var expected = Queries.combine(FILTER, asList(restFilter, queryFilter)); + assertEquals(expected.toString(), filter.toString()); + } + + public void testTimestampOverriddenFilterFilter() { + var eqValue = 1234; + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |EVAL {} = {} + |WHERE {} > {} + """, AT_TIMESTAMP, OTHER_FIELD, AT_TIMESTAMP, eqValue), null); + + var filter = filterQueryForTransportNodes(plan); + assertThat(filter, nullValue()); + } + + public void testTimestampAsFunctionArgument() { + var eqValue = 1234; + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE to_int(to_string({})) == {} + """, AT_TIMESTAMP, eqValue), null); + + var filter = filterQueryForTransportNodes(plan); + assertThat(filter, nullValue()); + } + + public void testTimestampAsFunctionArgumentInsideExpression() { + var eqValue = 1234; + + var plan = plan(LoggerMessageFormat.format(null, """ + FROM test + |WHERE to_int(to_string({})) + 987 == {} + """, AT_TIMESTAMP, eqValue), null); + + var filter = filterQueryForTransportNodes(plan); + assertThat(filter, nullValue()); + } + + /** + * Ugly hack to create a QueryBuilder for SingleValueQuery. + * For some reason however the queryName is set to null on range queries when deserializing. + */ + public static QueryBuilder singleValueQuery(QueryBuilder inner, String field) { + try (BytesStreamOutput out = new BytesStreamOutput()) { + // emulate SingleValueQuery writeTo + out.writeFloat(AbstractQueryBuilder.DEFAULT_BOOST); + out.writeOptionalString(null); + out.writeNamedWriteable(inner); + out.writeString(field); + + StreamInput in = new NamedWriteableAwareStreamInput( + ByteBufferStreamInput.wrap(BytesReference.toBytes(out.bytes())), + SerializationTestUtils.writableRegistry() + ); + + Object obj = SingleValueQuery.ENTRY.reader.read(in); + return (QueryBuilder) obj; + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + private PhysicalPlan plan(String query, QueryBuilder restFilter) { + var logical = logicalOptimizer.optimize(analyzer.analyze(parser.createStatement(query))); + // System.out.println("Logical\n" + logical); + var physical = mapper.map(logical); + // System.out.println("physical\n" + physical); + physical = physical.transformUp( + FragmentExec.class, + f -> new FragmentExec(f.source(), f.fragment(), restFilter, f.estimatedRowSize()) + ); + physical = physicalPlanOptimizer.optimize(physical); + // System.out.println("optimized\n" + physical); + assertSerialization(physical); + return physical; + } + + private QueryBuilder restFilterQuery(String field) { + return rangeQuery(field).lt("2020-12-34"); + } + + private QueryBuilder filterQueryForTransportNodes(PhysicalPlan plan) { + return PlannerUtils.detectFilter(plan, AT_TIMESTAMP); + } +} diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java index dab7b3ee41922..b1965f19e44f5 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/LocalExecutionPlannerTests.java @@ -23,7 +23,9 @@ import org.elasticsearch.compute.lucene.LuceneTopNSourceOperator; import org.elasticsearch.compute.operator.SourceOperator; import org.elasticsearch.core.IOUtils; +import org.elasticsearch.index.cache.query.TrivialQueryCachingPolicy; import org.elasticsearch.index.mapper.MapperServiceTestCase; +import org.elasticsearch.search.internal.ContextIndexSearcher; import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.test.TestSearchContext; import org.elasticsearch.xpack.esql.plan.physical.EsQueryExec; @@ -46,6 +48,7 @@ import java.util.Map; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.lessThanOrEqualTo; public class LocalExecutionPlannerTests extends MapperServiceTestCase { @ParametersFactory @@ -76,8 +79,9 @@ public void testLuceneSourceOperatorHugeRowSize() throws IOException { LocalExecutionPlanner.LocalExecutionPlan plan = planner().plan( new EsQueryExec(Source.EMPTY, index(), List.of(), null, null, null, estimatedRowSize) ); + assertThat(plan.driverFactories.size(), lessThanOrEqualTo(pragmas.taskConcurrency())); LocalExecutionPlanner.DriverSupplier supplier = plan.driverFactories.get(0).driverSupplier(); - var factory = (LuceneSourceOperator.LuceneSourceOperatorFactory) supplier.physicalOperation().sourceOperatorFactory; + var factory = (LuceneSourceOperator.Factory) supplier.physicalOperation().sourceOperatorFactory; assertThat(factory.maxPageSize(), maxPageSizeMatcher(estimatedRowSizeIsHuge, estimatedRowSize)); assertThat(factory.limit(), equalTo(Integer.MAX_VALUE)); } @@ -90,8 +94,9 @@ public void testLuceneTopNSourceOperator() throws IOException { LocalExecutionPlanner.LocalExecutionPlan plan = planner().plan( new EsQueryExec(Source.EMPTY, index(), List.of(), null, limit, List.of(sort), estimatedRowSize) ); + assertThat(plan.driverFactories.size(), lessThanOrEqualTo(pragmas.taskConcurrency())); LocalExecutionPlanner.DriverSupplier supplier = plan.driverFactories.get(0).driverSupplier(); - var factory = (LuceneTopNSourceOperator.LuceneTopNSourceOperatorFactory) supplier.physicalOperation().sourceOperatorFactory; + var factory = (LuceneTopNSourceOperator.Factory) supplier.physicalOperation().sourceOperatorFactory; assertThat(factory.maxPageSize(), maxPageSizeMatcher(estimatedRowSizeIsHuge, estimatedRowSize)); assertThat(factory.limit(), equalTo(10)); } @@ -133,11 +138,21 @@ private EsqlConfiguration config() { } private EsPhysicalOperationProviders esPhysicalOperationProviders() throws IOException { - return new EsPhysicalOperationProviders(List.of(searchContext())); - } - - private SearchContext searchContext() throws IOException { - return new TestSearchContext(createSearchExecutionContext(createMapperService(mapping(b -> {})), new IndexSearcher(reader()))); + int numShards = randomIntBetween(1, 1000); + List searchContexts = new ArrayList<>(numShards); + var searcher = new ContextIndexSearcher( + reader(), + IndexSearcher.getDefaultSimilarity(), + IndexSearcher.getDefaultQueryCache(), + TrivialQueryCachingPolicy.NEVER, + true + ); + for (int i = 0; i < numShards; i++) { + searchContexts.add( + new TestSearchContext(createSearchExecutionContext(createMapperService(mapping(b -> {})), searcher), null, searcher) + ); + } + return new EsPhysicalOperationProviders(searchContexts); } private IndexReader reader() { diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java index c088cae6f20c9..df163250e6e7a 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/planner/TestPhysicalOperationProviders.java @@ -52,7 +52,7 @@ public PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fieldExt Layout.Builder layout = source.layout.builder(); PhysicalOperation op = source; for (Attribute attr : fieldExtractExec.attributesToExtract()) { - layout.appendChannel(attr.id()); + layout.append(attr); op = op.with(new TestFieldExtractOperatorFactory(attr.name()), layout.build()); } return op; @@ -61,9 +61,7 @@ public PhysicalOperation fieldExtractPhysicalOperation(FieldExtractExec fieldExt @Override public PhysicalOperation sourcePhysicalOperation(EsQueryExec esQueryExec, LocalExecutionPlannerContext context) { Layout.Builder layout = new Layout.Builder(); - for (int i = 0; i < esQueryExec.output().size(); i++) { - layout.appendChannel(esQueryExec.output().get(i).id()); - } + layout.append(esQueryExec.output()); return PhysicalOperation.fromSource(new TestSourceOperatorFactory(), layout.build()); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java new file mode 100644 index 0000000000000..bbd6906221aa5 --- /dev/null +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/session/IndexResolverFieldNamesTests.java @@ -0,0 +1,1115 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.esql.session; + +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.esql.parser.EsqlParser; + +import java.util.Set; + +import static org.elasticsearch.xpack.ql.index.IndexResolver.ALL_FIELDS; +import static org.hamcrest.Matchers.equalTo; + +public class IndexResolverFieldNamesTests extends ESTestCase { + + private static final EsqlParser parser = new EsqlParser(); + + public void testBasicFromCommand() { + assertFieldNames("from test", ALL_FIELDS); + } + + public void testBasicFromCommandWithMetadata() { + assertFieldNames("from test [metadata _index, _id, _version]", ALL_FIELDS); + } + + public void testSimple1() { + assertFieldNames( + "from employees | sort emp_no | keep emp_no, still_hired | limit 3", + Set.of("emp_no", "emp_no.*", "still_hired", "still_hired.*") + ); + } + + public void testDirectFilter() { + assertFieldNames( + "from employees | sort emp_no | where still_hired | keep emp_no | limit 3", + Set.of("emp_no", "emp_no.*", "still_hired", "still_hired.*") + ); + } + + public void testSort1() { + assertFieldNames( + "from employees | sort still_hired, emp_no | keep emp_no, still_hired | limit 3", + Set.of("emp_no", "emp_no.*", "still_hired", "still_hired.*") + ); + } + + public void testStatsBy() { + assertFieldNames( + "from employees | stats avg(salary) by still_hired | sort still_hired", + Set.of("salary", "salary.*", "still_hired", "still_hired.*") + ); + } + + public void testStatsByAlwaysTrue() { + assertFieldNames( + "from employees | where first_name is not null | eval always_true = starts_with(first_name, \"\") " + + "| stats avg(salary) by always_true", + Set.of("first_name", "first_name.*", "salary", "salary.*") + ); + } + + public void testStatsByAlwaysFalse() { + assertFieldNames( + "from employees | where first_name is not null " + + "| eval always_false = starts_with(first_name, \"nonestartwiththis\") " + + "| stats avg(salary) by always_false", + Set.of("first_name", "first_name.*", "salary", "salary.*") + ); + } + + public void testIn1() { + assertFieldNames( + "from employees | keep emp_no, is_rehired, still_hired " + + "| where is_rehired in (still_hired, true) | where is_rehired != still_hired", + Set.of("emp_no", "emp_no.*", "is_rehired", "is_rehired.*", "still_hired", "still_hired.*") + ); + } + + public void testConvertFromString1() { + assertFieldNames(""" + from employees + | keep emp_no, is_rehired, first_name + | eval rehired_str = to_string(is_rehired) + | eval rehired_bool = to_boolean(rehired_str) + | eval all_false = to_boolean(first_name) + | drop first_name + | limit 5""", Set.of("emp_no", "emp_no.*", "is_rehired", "is_rehired.*", "first_name", "first_name.*")); + } + + public void testConvertFromDouble1() { + assertFieldNames(""" + from employees + | eval h_2 = height - 2.0, double2bool = to_boolean(h_2) + | where emp_no in (10036, 10037, 10038) + | keep emp_no, height, *2bool""", Set.of("height", "height.*", "emp_no", "emp_no.*", "h_2", "h_2.*", "*2bool.*", "*2bool")); + // TODO asking for more shouldn't hurt. Can we do better? ("h_2" shouldn't be in the list of fields) + // Set.of("height", "height.*", "emp_no", "emp_no.*", "*2bool.*", "*2bool")); + } + + public void testConvertFromIntAndLong() { + assertFieldNames( + "from employees | keep emp_no, salary_change*" + + "| eval int2bool = to_boolean(salary_change.int), long2bool = to_boolean(salary_change.long) | limit 10", + Set.of( + "emp_no", + "emp_no.*", + "salary_change*", + "salary_change.int.*", + "salary_change.int", + "salary_change.long.*", + "salary_change.long" + ) + ); + } + + public void testIntToInt() { + assertFieldNames(""" + from employees + | where emp_no < 10002 + | keep emp_no""", Set.of("emp_no", "emp_no.*")); + } + + public void testLongToLong() { + assertFieldNames( + """ + from employees + | where languages.long < avg_worked_seconds + | limit 1 + | keep emp_no""", + Set.of("emp_no", "emp_no.*", "languages.long", "languages.long.*", "avg_worked_seconds", "avg_worked_seconds.*") + ); + } + + public void testDateToDate() { + assertFieldNames(""" + from employees + | where birth_date < hire_date + | keep emp_no + | sort emp_no + | limit 1""", Set.of("birth_date", "birth_date.*", "emp_no", "emp_no.*", "hire_date", "hire_date.*")); + } + + public void testTwoConditionsWithDefault() { + assertFieldNames(""" + from employees + | eval type = case(languages <= 1, "monolingual", languages <= 2, "bilingual", "polyglot") + | keep emp_no, type + | limit 10""", Set.of("emp_no", "emp_no.*", "languages", "languages.*")); + } + + public void testSingleCondition() { + assertFieldNames(""" + from employees + | eval g = case(gender == "F", true) + | keep gender, g + | limit 10""", Set.of("gender", "gender.*")); + } + + public void testConditionIsNull() { + assertFieldNames(""" + from employees + | eval g = case(gender == "F", 1, languages > 1, 2, 3) + | keep gender, languages, g + | limit 25""", Set.of("gender", "gender.*", "languages", "languages.*")); + } + + public void testEvalAssign() { + assertFieldNames( + "from employees | sort hire_date | eval x = hire_date | keep emp_no, x | limit 5", + Set.of("hire_date", "hire_date.*", "emp_no", "emp_no.*") + ); + } + + public void testMinMax() { + assertFieldNames("from employees | stats min = min(hire_date), max = max(hire_date)", Set.of("hire_date", "hire_date.*")); + } + + public void testEvalDateTruncIntervalExpressionPeriod() { + assertFieldNames( + "from employees | sort hire_date | eval x = date_trunc(hire_date, 1 month) | keep emp_no, hire_date, x | limit 5", + Set.of("hire_date", "hire_date.*", "emp_no", "emp_no.*") + ); + } + + public void testEvalDateTruncGrouping() { + assertFieldNames(""" + from employees + | eval y = date_trunc(hire_date, 1 year) + | stats count(emp_no) by y + | sort y + | keep y, count(emp_no) + | limit 5""", Set.of("hire_date", "hire_date.*", "emp_no", "emp_no.*")); + } + + public void testIn2() { + assertFieldNames(""" + from employees + | eval x = date_trunc(hire_date, 1 year) + | where birth_date not in (x, hire_date) + | keep x, hire_date + | sort x desc + | limit 4""", Set.of("hire_date", "hire_date.*", "birth_date", "birth_date.*")); + } + + public void testAutoBucketMonth() { + assertFieldNames(""" + from employees + | where hire_date >= "1985-01-01T00:00:00Z" and hire_date < "1986-01-01T00:00:00Z" + | eval hd = auto_bucket(hire_date, 20, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") + | sort hire_date + | keep hire_date, hd""", Set.of("hire_date", "hire_date.*")); + } + + public void testBorn_before_today() { + assertFieldNames( + "from employees | where birth_date < now() | sort emp_no asc | keep emp_no, birth_date| limit 1", + Set.of("birth_date", "birth_date.*", "emp_no", "emp_no.*") + ); + } + + public void testAutoBucketMonthInAgg() { + assertFieldNames(""" + FROM employees + | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" + | EVAL bucket = AUTO_BUCKET(hire_date, 20, "1985-01-01T00:00:00Z", "1986-01-01T00:00:00Z") + | STATS AVG(salary) BY bucket + | SORT bucket""", Set.of("salary", "salary.*", "hire_date", "hire_date.*")); + } + + public void testEvalDateParseDynamic() { + assertFieldNames(""" + from employees + | where emp_no == 10039 or emp_no == 10040 + | sort emp_no + | eval birth_date_string = date_format(birth_date, "yyyy-MM-dd") + | eval new_date = date_parse(birth_date_string, "yyyy-MM-dd") + | eval bool = new_date == birth_date + | keep emp_no, new_date, birth_date, bool""", Set.of("emp_no", "emp_no.*", "birth_date", "birth_date.*")); + } + + public void testDateFields() { + assertFieldNames(""" + from employees + | where emp_no == 10049 or emp_no == 10050 + | eval year = date_extract(birth_date, "year"), month = date_extract(birth_date, "month_of_year") + | keep emp_no, year, month""", Set.of("emp_no", "emp_no.*", "birth_date", "birth_date.*")); + } + + public void testEvalDissect() { + assertFieldNames(""" + from employees + | eval full_name = concat(first_name, " ", last_name) + | dissect full_name "%{a} %{b}" + | sort emp_no asc + | keep full_name, a, b + | limit 3""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "emp_no", "emp_no.*")); + } + + public void testDissectExpression() { + assertFieldNames(""" + from employees + | dissect concat(first_name, " ", last_name) "%{a} %{b}" + | sort emp_no asc + | keep a, b + | limit 3""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "emp_no", "emp_no.*")); + } + + public void testMultivalueInput1() { + assertFieldNames(""" + from employees + | where emp_no <= 10006 + | dissect job_positions "%{a} %{b} %{c}" + | sort emp_no + | keep emp_no, a, b, c""", Set.of("emp_no", "emp_no.*", "job_positions", "job_positions.*")); + } + + public void testDocsDropHeight() { + assertFieldNames(""" + FROM employees + | DROP height + | LIMIT 0""", Set.of("*")); + } + + public void testDocsDropHeightWithWildcard() { + assertFieldNames(""" + FROM employees + | DROP height* + | LIMIT 0""", Set.of("*")); + } + + public void testDocsEval() { + assertFieldNames(""" + FROM employees + | KEEP first_name, last_name, height + | EVAL height_feet = height * 3.281, height_cm = height * 100 + | WHERE first_name == "Georgi" + | LIMIT 1""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "height", "height.*")); + } + + public void testDocsKeepWildcard() { + assertFieldNames(""" + FROM employees + | KEEP h* + | LIMIT 0""", Set.of("h*")); + } + + public void testDocsKeepDoubleWildcard() { + assertFieldNames(""" + FROM employees + | KEEP h*, * + | LIMIT 0""", Set.of("*")); + } + + public void testDocsRename() { + assertFieldNames(""" + FROM employees + | KEEP first_name, last_name, still_hired + | RENAME still_hired AS employed + | LIMIT 0""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "still_hired", "still_hired.*")); + } + + public void testDocsRenameMultipleColumns() { + assertFieldNames(""" + FROM employees + | KEEP first_name, last_name + | RENAME first_name AS fn, last_name AS ln + | LIMIT 0""", Set.of("first_name", "first_name.*", "last_name", "last_name.*")); + } + + public void testDocsStats() { + assertFieldNames(""" + FROM employees + | STATS count = COUNT(emp_no) BY languages + | SORT languages""", Set.of("emp_no", "emp_no.*", "languages", "languages.*")); + } + + public void testSortWithLimitOne_DropHeight() { + assertFieldNames("from employees | sort languages | limit 1 | drop height*", Set.of("*")); + } + + public void testDropAllColumns() { + assertFieldNames("from employees | keep height | drop height | eval x = 1", Set.of("height", "height.*")); + } + + public void testDropAllColumns_WithStats() { + assertFieldNames( + "from employees | keep height | drop height | eval x = 1 | stats c=count(x), mi=min(x), s=sum(x)", + Set.of("height", "height.*") + ); + } + + public void testEnrichOn() { + assertFieldNames(""" + from employees + | sort emp_no + | limit 1 + | eval x = to_string(languages) + | enrich languages_policy on x + | keep emp_no, language_name""", Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*")); + } + + public void testEnrichOn2() { + assertFieldNames(""" + from employees + | eval x = to_string(languages) + | enrich languages_policy on x + | keep emp_no, language_name + | sort emp_no + | limit 1""", Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*")); + } + + public void testUselessEnrich() { + assertFieldNames(""" + from employees + | eval x = "abc" + | enrich languages_policy on x + | limit 1""", Set.of("*")); + } + + public void testSimpleSortLimit() { + assertFieldNames(""" + from employees + | eval x = to_string(languages) + | enrich languages_policy on x + | keep emp_no, language_name + | sort emp_no + | limit 1""", Set.of("languages", "languages.*", "emp_no", "emp_no.*", "language_name", "language_name.*")); + } + + public void testWith() { + assertFieldNames( + """ + from employees | eval x = to_string(languages) | keep emp_no, x | sort emp_no | limit 1 + | enrich languages_policy on x with language_name""", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*") + ); + } + + public void testWithAlias() { + assertFieldNames( + """ + from employees | sort emp_no | limit 3 | eval x = to_string(languages) | keep emp_no, x + | enrich languages_policy on x with lang = language_name""", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*") + ); + } + + public void testWithAliasSort() { + assertFieldNames( + """ + from employees | eval x = to_string(languages) | keep emp_no, x | sort emp_no | limit 3 + | enrich languages_policy on x with lang = language_name""", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*") + ); + } + + public void testWithAliasAndPlain() { + assertFieldNames( + """ + from employees | sort emp_no desc | limit 3 | eval x = to_string(languages) | keep emp_no, x + | enrich languages_policy on x with lang = language_name, language_name""", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*") + ); + } + + public void testWithTwoAliasesSameProp() { + assertFieldNames( + """ + from employees | sort emp_no | limit 1 | eval x = to_string(languages) | keep emp_no, x + | enrich languages_policy on x with lang = language_name, lang2 = language_name""", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*") + ); + } + + public void testRedundantWith() { + assertFieldNames( + """ + from employees | sort emp_no | limit 1 | eval x = to_string(languages) | keep emp_no, x + | enrich languages_policy on x with language_name, language_name""", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*") + ); + } + + public void testNullInput() { + assertFieldNames( + """ + from employees + | where emp_no == 10017 + | keep emp_no, gender + | enrich languages_policy on gender with language_name, language_name""", + Set.of("gender", "gender.*", "emp_no", "emp_no.*", "language_name", "language_name.*") + ); + } + + public void testConstantNullInput() { + assertFieldNames( + """ + from employees + | where emp_no == 10020 + | eval x = to_string(languages) + | keep emp_no, x + | enrich languages_policy on x with language_name, language_name""", + Set.of("languages", "languages.*", "emp_no", "emp_no.*", "language_name", "language_name.*") + ); + } + + public void testEnrichEval() { + assertFieldNames(""" + from employees + | eval x = to_string(languages) + | enrich languages_policy on x with lang = language_name + | eval language = concat(x, "-", lang) + | keep emp_no, x, lang, language + | sort emp_no desc | limit 3""", Set.of("languages", "languages.*", "emp_no", "emp_no.*", "language_name", "language_name.*")); + } + + public void testSimple() { + assertFieldNames(""" + from employees + | eval x = 1, y = to_string(languages) + | enrich languages_policy on y + | where x > 1 + | keep emp_no, language_name + | limit 1""", Set.of("emp_no", "emp_no.*", "languages", "languages.*", "language_name", "language_name.*")); + } + + public void testEvalNullSort() { + assertFieldNames( + "from employees | eval x = null | sort x asc, emp_no desc | keep emp_no, x, last_name | limit 2", + Set.of("last_name", "last_name.*", "emp_no", "emp_no.*") + ); + } + + public void testFilterEvalFilter() { + assertFieldNames(""" + from employees + | where emp_no < 100010 + | eval name_len = length(first_name) + | where name_len < 4 + | keep first_name + | sort first_name""", Set.of("emp_no", "emp_no.*", "first_name", "first_name.*")); + } + + public void testEvalWithIsNullIsNotNull() { + assertFieldNames( + """ + from employees + | eval true_bool = null is null, false_bool = null is not null, negated_true = not(null is null) + | sort emp_no + | limit 1 + | keep *true*, *false*, first_name, last_name""", + Set.of("emp_no", "emp_no.*", "first_name", "first_name.*", "last_name", "last_name.*", "*true*", "*false*") + ); + } + + public void testInDouble() { + assertFieldNames( + "from employees | keep emp_no, height, height.float, height.half_float, height.scaled_float | where height in (2.03)", + Set.of( + "emp_no", + "emp_no.*", + "height", + "height.*", + "height.float", + "height.float.*", + "height.half_float", + "height.half_float.*", + "height.scaled_float", + "height.scaled_float.*" + ) + ); + } + + public void testConvertFromDatetime() { + assertFieldNames( + "from employees | sort emp_no | eval hire_double = to_double(hire_date) | keep emp_no, hire_date, hire_double | limit 3", + Set.of("emp_no", "emp_no.*", "hire_date", "hire_date.*") + ); + } + + public void testAutoBucket() { + assertFieldNames(""" + FROM employees + | WHERE hire_date >= "1985-01-01T00:00:00Z" AND hire_date < "1986-01-01T00:00:00Z" + | EVAL bh = auto_bucket(height, 20, 1.41, 2.10) + | SORT hire_date + | KEEP hire_date, height, bh""", Set.of("hire_date", "hire_date.*", "height", "height.*")); + } + + public void testEvalGrok() { + assertFieldNames(""" + from employees + | eval full_name = concat(first_name, " ", last_name) + | grok full_name "%{WORD:a} %{WORD:b}" + | sort emp_no asc + | keep full_name, a, b + | limit 3""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "emp_no", "emp_no.*")); + } + + public void testGrokExpression() { + assertFieldNames(""" + from employees + | grok concat(first_name, " ", last_name) "%{WORD:a} %{WORD:b}" + | sort emp_no asc + | keep a, b + | limit 3""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "emp_no", "emp_no.*")); + } + + public void testEvalGrokSort() { + assertFieldNames(""" + from employees + | eval full_name = concat(first_name, " ", last_name) + | grok full_name "%{WORD:a} %{WORD:b}" + | sort a asc + | keep full_name, a, b + | limit 3""", Set.of("first_name", "first_name.*", "last_name", "last_name.*")); + } + + public void testGrokStats() { + assertFieldNames(""" + from employees + | eval x = concat(gender, " foobar") + | grok x "%{WORD:a} %{WORD:b}" + | stats n = max(emp_no) by a + | keep a, n + | sort a asc""", Set.of("gender", "gender.*", "emp_no", "emp_no.*")); + } + + public void testNullOnePattern() { + assertFieldNames(""" + from employees + | where emp_no == 10030 + | grok first_name "%{WORD:a}" + | keep first_name, a""", Set.of("first_name", "first_name.*", "emp_no", "emp_no.*")); + } + + public void testMultivalueInput() { + assertFieldNames(""" + from employees + | where emp_no <= 10006 + | grok job_positions "%{WORD:a} %{WORD:b} %{WORD:c}" + | sort emp_no + | keep emp_no, a, b, c, job_positions""", Set.of("job_positions", "job_positions.*", "emp_no", "emp_no.*")); + } + + public void testSelectAll() { + assertFieldNames("FROM apps [metadata _id]", Set.of("*")); + } + + public void testFilterById() { + assertFieldNames("FROM apps [metadata _id]| WHERE _id == \"4\"", Set.of("*")); + } + + public void testKeepId() { + assertFieldNames("FROM apps [metadata _id] | WHERE id == 3 | KEEP _id", Set.of("id", "id.*")); + } + + public void testIdRangeAndSort() { + assertFieldNames(""" + FROM apps [metadata _id] + | WHERE _id >= "2" AND _id <= "7" + | SORT _id + | keep id, name, _id""", Set.of("id", "id.*", "name", "name.*")); + } + + public void testOrderById() { + assertFieldNames("FROM apps [metadata _id] | KEEP _id, name | SORT _id", Set.of("name", "name.*")); + } + + public void testOrderByIdDesc() { + assertFieldNames("FROM apps [metadata _id] | KEEP _id, name | SORT _id DESC", Set.of("name", "name.*")); + } + + public void testConcatId() { + assertFieldNames("FROM apps [metadata _id] | eval c = concat(_id, name) | SORT _id | KEEP c", Set.of("name", "name.*")); + } + + public void testStatsOnId() { + assertFieldNames("FROM apps [metadata _id] | stats c = count(_id), d = count_distinct(_id)", Set.of("*")); + } + + public void testStatsOnIdByGroup() { + assertFieldNames("FROM apps [metadata _id] | stats c = count(_id) by name | sort c desc, name | limit 5", Set.of("name", "name.*")); + } + + public void testSimpleProject() { + assertFieldNames( + "from hosts | keep card, host, ip0, ip1", + Set.of("card", "card.*", "host", "host.*", "ip0", "ip0.*", "ip1", "ip1.*") + ); + } + + public void testEquals() { + assertFieldNames( + "from hosts | sort host, card | where ip0 == ip1 | keep card, host", + Set.of("card", "card.*", "host", "host.*", "ip0", "ip0.*", "ip1", "ip1.*") + ); + } + + public void testConditional() { + assertFieldNames("from hosts | eval eq=case(ip0==ip1, ip0, ip1) | keep eq, ip0, ip1", Set.of("ip1", "ip1.*", "ip0", "ip0.*")); + } + + public void testWhereWithAverageBySubField() { + assertFieldNames( + "from employees | where languages + 1 == 6 | stats avg(avg_worked_seconds) by languages.long", + Set.of("languages", "languages.*", "avg_worked_seconds", "avg_worked_seconds.*", "languages.long", "languages.long.*") + ); + } + + public void testAverageOfEvalValue() { + assertFieldNames( + "from employees | eval ratio = salary / height | stats avg(ratio)", + Set.of("salary", "salary.*", "height", "height.*") + ); + } + + public void testTopNProjectEvalProject() { + assertFieldNames( + "from employees | sort salary | limit 1 | keep languages, salary | eval x = languages + 1 | keep x", + Set.of("salary", "salary.*", "languages", "languages.*") + ); + } + + public void testMvSum() { + assertFieldNames(""" + from employees + | where emp_no > 10008 + | eval salary_change = mv_sum(salary_change.int) + | sort emp_no + | keep emp_no, salary_change.int, salary_change + | limit 7""", Set.of("emp_no", "emp_no.*", "salary_change.int", "salary_change.int.*")); + } + + public void testMetaIndexAliasedInAggs() { + assertFieldNames( + "from employees [metadata _index] | eval _i = _index | stats max = max(emp_no) by _i", + Set.of("emp_no", "emp_no.*") + ); + } + + public void testCoalesceFolding() { + assertFieldNames(""" + FROM employees + | EVAL foo=COALESCE(true, false, null) + | SORT emp_no ASC + | KEEP emp_no, first_name, foo + | limit 3""", Set.of("emp_no", "emp_no.*", "first_name", "first_name.*")); + } + + public void testRenameEvalProject() { + assertFieldNames( + "from employees | rename languages as x | keep x | eval z = 2 * x | keep x, z | limit 3", + Set.of("languages", "languages.*") + ); + } + + public void testRenameProjectEval() { + assertFieldNames(""" + from employees + | eval y = languages + | rename languages as x + | keep x, y + | eval x2 = x + 1 + | eval y2 = y + 2 + | limit 3""", Set.of("languages", "languages.*")); + } + + public void testRenameWithFilterPushedToES() { + assertFieldNames( + "from employees | rename emp_no as x | keep languages, first_name, last_name, x | where x > 10030 and x < 10040 | limit 5", + Set.of("emp_no", "emp_no.*", "languages", "languages.*", "first_name", "first_name.*", "last_name", "last_name.*") + ); + } + + public void testRenameOverride() { + assertFieldNames( + "from employees | rename emp_no as languages | keep languages, last_name | limit 3", + Set.of("emp_no", "emp_no.*", "last_name", "last_name.*") + ); + } + + public void testProjectRenameDate() { + assertFieldNames( + "from employees | sort hire_date | rename hire_date as x | keep emp_no, x | limit 5", + Set.of("hire_date", "hire_date.*", "emp_no", "emp_no.*") + ); + } + + public void testRenameDrop() { + assertFieldNames(""" + from employees + | sort hire_date + | rename hire_date as x, emp_no as y + | drop first_name, last_name, gender, birth_date, salary, languages*, height*, still_hired, avg_worked_seconds, + job_positions, is_rehired, salary_change* + | limit 5""", Set.of("*")); + } + + public void testMaxOfLong() { + assertFieldNames("from employees | stats l = max(languages.long)", Set.of("languages.long", "languages.long.*")); + } + + public void testGroupByAlias() { + assertFieldNames( + "from employees | rename languages as l | keep l, height | stats m = min(height) by l | sort l", + Set.of("languages", "languages.*", "height", "height.*") + ); + } + + public void testByStringAndLong() { + assertFieldNames(""" + from employees + | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 + | stats c = count(gender) by gender, trunk_worked_seconds + | sort c desc""", Set.of("avg_worked_seconds", "avg_worked_seconds.*", "gender", "gender.*")); + } + + public void testByStringAndLongWithAlias() { + assertFieldNames(""" + from employees + | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 + | rename gender as g, trunk_worked_seconds as tws + | keep g, tws + | stats c = count(g) by g, tws + | sort c desc""", Set.of("avg_worked_seconds", "avg_worked_seconds.*", "gender", "gender.*")); + } + + public void testByStringAndString() { + assertFieldNames(""" + from employees + | eval hire_year_str = date_format(hire_date, "yyyy") + | stats c = count(gender) by gender, hire_year_str + | sort c desc, gender, hire_year_str + | where c >= 5""", Set.of("hire_date", "hire_date.*", "gender", "gender.*")); + } + + public void testByLongAndLong() { + assertFieldNames(""" + from employees + | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 + | stats c = count(languages.long) by languages.long, trunk_worked_seconds + | sort c desc""", Set.of("avg_worked_seconds", "avg_worked_seconds.*", "languages.long", "languages.long.*")); + } + + public void testByDateAndKeywordAndIntWithAlias() { + assertFieldNames(""" + from employees + | eval d = date_trunc(hire_date, 1 year) + | rename gender as g, languages as l, emp_no as e + | keep d, g, l, e + | stats c = count(e) by d, g, l + | sort c desc, d, l desc + | limit 10""", Set.of("hire_date", "hire_date.*", "gender", "gender.*", "languages", "languages.*", "emp_no", "emp_no.*")); + } + + public void testCountDistinctOfKeywords() { + assertFieldNames( + """ + from employees + | eval hire_year_str = date_format(hire_date, "yyyy") + | stats g = count_distinct(gender), h = count_distinct(hire_year_str)""", + Set.of("hire_date", "hire_date.*", "gender", "gender.*") + ); + } + + public void testCountDistinctOfIpPrecision() { + assertFieldNames(""" + FROM hosts + | STATS COUNT_DISTINCT(ip0, 80000), COUNT_DISTINCT(ip1, 5)""", Set.of("ip0", "ip0.*", "ip1", "ip1.*")); + } + + public void testPercentileOfLong() { + assertFieldNames( + """ + from employees + | stats p0 = percentile(salary_change.long, 0), p50 = percentile(salary_change.long, 50)""", + Set.of("salary_change.long", "salary_change.long.*") + ); + } + + public void testMedianOfInteger() { + assertFieldNames(""" + FROM employees + | STATS MEDIAN(salary), PERCENTILE(salary, 50)""", Set.of("salary", "salary.*")); + } + + public void testMedianAbsoluteDeviation() { + assertFieldNames(""" + FROM employees + | STATS MEDIAN(salary), MEDIAN_ABSOLUTE_DEVIATION(salary)""", Set.of("salary", "salary.*")); + } + + public void testIn3VLWithComputedNull() { + assertFieldNames( + """ + from employees + | where mv_count(job_positions) <= 1 + | where emp_no >= 10024 + | limit 3 + | keep emp_no, job_positions + | eval nil = concat("", null) + | eval is_in = job_positions in ("Accountant", "Internship", nil)""", + Set.of("job_positions", "job_positions.*", "emp_no", "emp_no.*") + ); + } + + public void testCase() { + assertFieldNames(""" + FROM apps + | EVAL version_text = TO_STR(version) + | WHERE version IS NULL OR version_text LIKE "1*" + | EVAL v = TO_VER(CONCAT("123", TO_STR(version))) + | EVAL m = CASE(version > TO_VER("1.1"), 1, 0) + | EVAL g = CASE(version > TO_VER("1.3.0"), version, TO_VER("1.3.0")) + | EVAL i = CASE(version IS NULL, TO_VER("0.1"), version) + | EVAL c = CASE( + version > TO_VER("1.1"), "high", + version IS NULL, "none", + "low") + | SORT version DESC NULLS LAST, id DESC + | KEEP v, version, version_text, id, m, g, i, c""", Set.of("version", "version.*", "id", "id.*")); + } + + public void testLikePrefix() { + assertFieldNames(""" + from employees + | where first_name like "Eberhar*" + | keep emp_no, first_name""", Set.of("emp_no", "emp_no.*", "first_name", "first_name.*")); + } + + public void testRLikePrefix() { + assertFieldNames(""" + from employees + | where first_name rlike "Aleja.*" + | keep emp_no""", Set.of("first_name", "first_name.*", "emp_no", "emp_no.*")); + } + + public void testByUnmentionedLongAndLong() { + assertFieldNames( + """ + from employees + | eval trunk_worked_seconds = avg_worked_seconds / 100000000 * 100000000 + | stats c = count(gender) by languages.long, trunk_worked_seconds + | sort c desc""", + Set.of("avg_worked_seconds", "avg_worked_seconds.*", "languages.long", "languages.long.*", "gender", "gender.*") + ); + } + + public void testRenameNopProject() { + assertFieldNames(""" + from employees + | rename emp_no as emp_no + | keep emp_no, last_name + | limit 3""", Set.of("emp_no", "emp_no.*", "last_name", "last_name.*")); + } + + public void testRename() { + assertFieldNames(""" + from test + | rename emp_no as e + | keep first_name, e + """, Set.of("emp_no", "emp_no.*", "first_name", "first_name.*")); + } + + public void testChainedRename() { + assertFieldNames(""" + from test + | rename emp_no as r1, r1 as r2, r2 as r3 + | keep first_name, r3 + """, Set.of("emp_no", "emp_no.*", "first_name", "first_name.*", "r1", "r1.*", "r2", "r2.*"));// TODO asking for more shouldn't + // hurt. Can we do better? + // Set.of("emp_no", "emp_no.*", "first_name", "first_name.*")); + } + + public void testChainedRenameReuse() { + assertFieldNames(""" + from test + | rename emp_no as r1, r1 as r2, r2 as r3, first_name as r1 + | keep r1, r3 + """, Set.of("emp_no", "emp_no.*", "first_name", "first_name.*", "r1", "r1.*", "r2", "r2.*"));// TODO asking for more shouldn't + // hurt. Can we do better? + // Set.of("emp_no", "emp_no.*", "first_name", "first_name.*")); + } + + public void testRenameBackAndForth() { + assertFieldNames(""" + from test + | rename emp_no as r1, r1 as emp_no + | keep emp_no + """, Set.of("emp_no", "emp_no.*", "r1", "r1.*"));// TODO asking for more shouldn't hurt. Can we do better? + // Set.of("emp_no", "emp_no.*")); + } + + public void testRenameReuseAlias() { + assertFieldNames(""" + from test + | rename emp_no as e, first_name as e + """, Set.of("*")); + } + + public void testIfDuplicateNamesGroupingHasPriority() { + assertFieldNames( + "from employees | stats languages = avg(height), languages = min(height) by languages | sort languages", + Set.of("height", "height.*", "languages", "languages.*") + ); + } + + public void testCoalesce() { + assertFieldNames(""" + FROM employees + | EVAL first_name = COALESCE(first_name, "X") + | SORT first_name DESC, emp_no ASC + | KEEP emp_no, first_name + | limit 10""", Set.of("first_name", "first_name.*", "emp_no", "emp_no.*")); + } + + public void testCoalesceBackwards() { + assertFieldNames(""" + FROM employees + | EVAL first_name = COALESCE("X", first_name) + | SORT first_name DESC, emp_no ASC + | KEEP emp_no, first_name + | limit 10""", Set.of("first_name", "first_name.*", "emp_no", "emp_no.*")); + } + + public void testGroupByVersionCast() { + assertFieldNames(""" + FROM apps + | EVAL g = TO_VER(CONCAT("1.", TO_STR(version))) + | STATS id = MAX(id) BY g + | SORT id + | DROP g""", Set.of("version", "version.*", "id", "id.*")); + } + + public void testCoalesceEndsInNull() { + assertFieldNames(""" + FROM employees + | EVAL first_name = COALESCE(first_name, last_name, null) + | SORT first_name DESC, emp_no ASC + | KEEP emp_no, first_name + | limit 3""", Set.of("first_name", "first_name.*", "last_name", "last_name.*", "emp_no", "emp_no.*")); + } + + public void testMvAvg() { + assertFieldNames(""" + from employees + | where emp_no > 10008 + | eval salary_change = mv_avg(salary_change) + | sort emp_no + | keep emp_no, salary_change.int, salary_change + | limit 7""", Set.of("emp_no", "emp_no.*", "salary_change", "salary_change.*", "salary_change.int", "salary_change.int.*")); + } + + public void testEvalOverride() { + assertFieldNames(""" + from employees + | eval languages = languages + 1 + | eval languages = languages + 1 + | limit 5 + | keep l*""", Set.of("languages", "languages.*", "l*"));// subtlety here. Keeping only "languages*" can remove any other "l*" + // named fields + } + + public void testBasicWildcardKeep() { + assertFieldNames("from test | keep *", Set.of("*")); + } + + public void testBasicWildcardKeep2() { + assertFieldNames(""" + from test + | keep un* + """, Set.of("un*")); + } + + public void testWildcardKeep() { + assertFieldNames(""" + from test + | keep first_name, *, last_name + """, Set.of("*")); + } + + public void testProjectThenDropName() { + assertFieldNames(""" + from test + | keep *name + | drop first_name + """, Set.of("*name", "*name.*", "first_name", "first_name.*")); + } + + public void testProjectAfterDropName() { + assertFieldNames(""" + from test + | drop first_name + | keep *name + """, Set.of("*name.*", "*name", "first_name", "first_name.*")); + } + + public void testProjectKeepAndDropName() { + assertFieldNames(""" + from test + | drop first_name + | keep last_name + """, Set.of("last_name", "last_name.*", "first_name", "first_name.*")); + } + + public void testProjectDropPattern() { + assertFieldNames(""" + from test + | keep * + | drop *_name + """, Set.of("*")); + } + + public void testProjectDropNoStarPattern() { + assertFieldNames(""" + from test + | drop *_name + """, Set.of("*")); + } + + public void testProjectOrderPatternWithRest() { + assertFieldNames(""" + from test + | keep *name, *, emp_no + """, Set.of("*")); + } + + public void testProjectDropPatternAndKeepOthers() { + assertFieldNames(""" + from test + | drop l* + | keep first_name, salary + """, Set.of("l*", "first_name", "first_name.*", "salary", "salary.*")); + } + + public void testAliasesThatGetDropped() { + assertFieldNames(""" + from test + | eval x = languages + 1 + | where first_name like "%A" + | eval first_name = concat(first_name, "xyz") + | drop first_name + """, Set.of("*")); + } + + private void assertFieldNames(String query, Set expected) { + Set fieldNames = EsqlSession.fieldNames(parser.createStatement(query)); + assertThat(fieldNames, equalTo(expected)); + } +} diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/ApplicationActionsResolver.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/ApplicationActionsResolver.java index e9b1ca3833ea1..9d5e7cae6ad31 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/ApplicationActionsResolver.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/privileges/ApplicationActionsResolver.java @@ -33,7 +33,6 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesAction; import org.elasticsearch.xpack.core.security.action.privilege.GetPrivilegesRequest; @@ -83,7 +82,8 @@ public ApplicationActionsResolver(Settings settings, ServiceProviderDefaults def // Preload the cache at 2/3 of its expiry time (TTL). This means that we should never have an empty cache, but if for some reason // the preload thread stops running, we will still automatically refresh the cache on access. final TimeValue preloadInterval = TimeValue.timeValueMillis(cacheTtl.millis() * 2 / 3); - client.threadPool().scheduleWithFixedDelay(this::loadPrivilegesForDefaultApplication, preloadInterval, ThreadPool.Names.GENERIC); + client.threadPool() + .scheduleWithFixedDelay(this::loadPrivilegesForDefaultApplication, preloadInterval, client.threadPool().generic()); } public static Collection> getSettings() { diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilder.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilder.java index 885e77d3eeec0..8051b08c78a86 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilder.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilder.java @@ -168,6 +168,10 @@ public SamlIdentityProvider build() throws ValidationException { ex.addValidationError("Service provider defaults must be specified"); } + if (allowedNameIdFormats == null || allowedNameIdFormats.isEmpty()) { + ex.addValidationError("At least 1 allowed NameID format must be specified"); + } + if (ex.validationErrors().isEmpty() == false) { throw ex; } @@ -260,6 +264,9 @@ public SamlIdentityProviderBuilder singleLogoutEndpoint(String binding, URL endp } public SamlIdentityProviderBuilder allowedNameIdFormat(String nameIdFormat) { + if (this.allowedNameIdFormats == null) { + this.allowedNameIdFormats = new HashSet<>(); + } this.allowedNameIdFormats.add(nameIdFormat); return this; } diff --git a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java index 3562ba866c049..c16f27229c5a2 100644 --- a/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java +++ b/x-pack/plugin/identity-provider/src/main/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocument.java @@ -256,16 +256,16 @@ public SamlServiceProviderDocument(StreamInput in) throws IOException { authenticationExpiryMillis = in.readOptionalVLong(); privileges.resource = in.readString(); - privileges.rolePatterns = new TreeSet<>(in.readSet(StreamInput::readString)); + privileges.rolePatterns = new TreeSet<>(in.readCollectionAsSet(StreamInput::readString)); attributeNames.principal = in.readString(); attributeNames.email = in.readOptionalString(); attributeNames.name = in.readOptionalString(); attributeNames.roles = in.readOptionalString(); - certificates.serviceProviderSigning = in.readStringList(); - certificates.identityProviderSigning = in.readStringList(); - certificates.identityProviderMetadataSigning = in.readStringList(); + certificates.serviceProviderSigning = in.readStringCollectionAsList(); + certificates.identityProviderSigning = in.readStringCollectionAsList(); + certificates.identityProviderMetadataSigning = in.readStringCollectionAsList(); } @Override diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/DeleteSamlServiceProviderRequestTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/DeleteSamlServiceProviderRequestTests.java index 7634dab750b8b..4beda3cc18792 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/DeleteSamlServiceProviderRequestTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/DeleteSamlServiceProviderRequestTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.idp.action; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.WriteRequest; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.test.TransportVersionUtils; @@ -28,7 +29,7 @@ public void testSerialization() throws IOException { ); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_7_0, + TransportVersions.V_7_7_0, TransportVersion.current() ); final DeleteSamlServiceProviderRequest read = copyWriteable( diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/PutSamlServiceProviderRequestTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/PutSamlServiceProviderRequestTests.java index 50556a26f5260..60675b9355973 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/PutSamlServiceProviderRequestTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/action/PutSamlServiceProviderRequestTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.support.WriteRequest.RefreshPolicy; import org.elasticsearch.common.Strings; @@ -86,7 +87,7 @@ public void testSerialization() throws IOException { final PutSamlServiceProviderRequest request = new PutSamlServiceProviderRequest(doc, RefreshPolicy.NONE); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_7_0, + TransportVersions.V_7_7_0, TransportVersion.current() ); final PutSamlServiceProviderRequest read = copyWriteable( diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilderTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilderTests.java index 1ca0a955dd08a..e5f995204ac0c 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilderTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/idp/SamlIdentityProviderBuilderTests.java @@ -21,8 +21,10 @@ import org.elasticsearch.xpack.idp.saml.test.IdpSamlTestCase; import org.hamcrest.Matchers; import org.mockito.Mockito; +import org.opensaml.saml.saml2.core.NameID; import org.opensaml.security.x509.X509Credential; +import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; import java.security.PrivateKey; @@ -43,13 +45,16 @@ import static org.elasticsearch.xpack.idp.saml.idp.SamlIdentityProviderBuilder.IDP_SLO_REDIRECT_ENDPOINT; import static org.elasticsearch.xpack.idp.saml.idp.SamlIdentityProviderBuilder.IDP_SSO_POST_ENDPOINT; import static org.elasticsearch.xpack.idp.saml.idp.SamlIdentityProviderBuilder.IDP_SSO_REDIRECT_ENDPOINT; +import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.notNullValue; import static org.opensaml.saml.common.xml.SAMLConstants.SAML2_POST_BINDING_URI; import static org.opensaml.saml.common.xml.SAMLConstants.SAML2_REDIRECT_BINDING_URI; +import static org.opensaml.saml.saml2.core.NameIDType.EMAIL; import static org.opensaml.saml.saml2.core.NameIDType.PERSISTENT; import static org.opensaml.saml.saml2.core.NameIDType.TRANSIENT; @@ -592,4 +597,39 @@ public void testCreateMetadataSigningCredentialFromKeyStoreWithMultipleEntriesBu ); } + public void testCreateViaMethodCalls() throws Exception { + final String entityId = randomAlphaOfLength(4) + ":" + randomAlphaOfLength(6) + "/" + randomAlphaOfLengthBetween(4, 12); + final URL redirectUrl = new URL( + randomFrom("http", "https") + + "://" + + String.join(".", randomArray(2, 5, String[]::new, () -> randomAlphaOfLengthBetween(3, 6))) + + "/" + + String.join("/", randomArray(1, 3, String[]::new, () -> randomAlphaOfLengthBetween(2, 4))) + ); + + final X509Credential credential = readCredentials("RSA", randomFrom(1024, 2048)); + final String nameIdFormat = randomFrom(NameID.TRANSIENT, PERSISTENT, EMAIL); + + final SamlServiceProviderResolver serviceResolver = Mockito.mock(SamlServiceProviderResolver.class); + final WildcardServiceProviderResolver wildcardResolver = Mockito.mock(WildcardServiceProviderResolver.class); + final ServiceProviderDefaults spDefaults = new ServiceProviderDefaults( + randomAlphaOfLength(2), + nameIdFormat, + Duration.ofMinutes(randomIntBetween(1, 10)) + ); + final SamlIdentityProvider idp = SamlIdentityProvider.builder(serviceResolver, wildcardResolver) + .entityId(entityId) + .singleSignOnEndpoint(SAML2_REDIRECT_BINDING_URI, redirectUrl) + .signingCredential(credential) + .serviceProviderDefaults(spDefaults) + .allowedNameIdFormat(nameIdFormat) + .build(); + + assertThat(idp.getEntityId(), is(entityId)); + assertThat(idp.getSingleSignOnEndpoint(SAML2_REDIRECT_BINDING_URI), is(redirectUrl)); + assertThat(idp.getSigningCredential(), is(credential)); + assertThat(idp.getServiceProviderDefaults(), is(spDefaults)); + assertThat(idp.getAllowedNameIdFormats(), contains(nameIdFormat)); + } + } diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocumentTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocumentTests.java index a4fedb3b5b8d0..233702d7ddd9a 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocumentTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/sp/SamlServiceProviderDocumentTests.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.idp.saml.sp; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ValidationException; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -161,7 +162,7 @@ private SamlServiceProviderDocument assertXContentRoundTrip(SamlServiceProviderD private SamlServiceProviderDocument assertSerializationRoundTrip(SamlServiceProviderDocument doc) throws IOException { final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_7_0, + TransportVersions.V_7_7_0, TransportVersion.current() ); final SamlServiceProviderDocument read = copyWriteable( diff --git a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/support/SamlAuthenticationStateTests.java b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/support/SamlAuthenticationStateTests.java index 35dd53211bbbc..934126e7f0fa0 100644 --- a/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/support/SamlAuthenticationStateTests.java +++ b/x-pack/plugin/identity-provider/src/test/java/org/elasticsearch/xpack/idp/saml/support/SamlAuthenticationStateTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.idp.saml.support; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.xcontent.XContentHelper; @@ -89,7 +90,7 @@ private SamlAuthenticationState assertXContentRoundTrip(SamlAuthenticationState private SamlAuthenticationState assertSerializationRoundTrip(SamlAuthenticationState state) throws IOException { final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_7_0, + TransportVersions.V_7_7_0, TransportVersion.current() ); final SamlAuthenticationState read = copyWriteable( diff --git a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java index 3b3279c353fba..f53b7d1d9af14 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/javaRestTest/java/org/elasticsearch/xpack/ilm/actions/DownsampleActionIT.java @@ -373,7 +373,8 @@ public void testDownsampleTwice() throws Exception { assertThat(indexExists(downsampleIndexName), is(false)); Map settings = getOnlyIndexSettings(client(), downsampleOfDownsampleIndexName); - assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); + assertEquals(firstBackingIndex, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_ORIGIN_NAME.getKey())); + assertEquals(downsampleIndexName, settings.get(IndexMetadata.INDEX_DOWNSAMPLE_SOURCE_NAME.getKey())); assertEquals(DownsampleTaskStatus.SUCCESS.toString(), settings.get(IndexMetadata.INDEX_DOWNSAMPLE_STATUS.getKey())); assertEquals(policy, settings.get(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey())); }, 60, TimeUnit.SECONDS); diff --git a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java index ce4f79f1dcfc8..0a252a0d62958 100644 --- a/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java +++ b/x-pack/plugin/ilm/src/internalClusterTest/java/org/elasticsearch/xpack/ilm/IndexLifecycleInitialisationTests.java @@ -588,14 +588,14 @@ public static class ObservableAction extends MockAction { } public static ObservableAction readObservableAction(StreamInput in) throws IOException { - List steps = in.readList(ObservableClusterStateWaitStep::new); + List steps = in.readCollectionAsList(ObservableClusterStateWaitStep::new); boolean safe = in.readBoolean(); return new ObservableAction(steps, safe); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(getSteps().stream().map(s -> (ObservableClusterStateWaitStep) s).collect(Collectors.toList())); + out.writeCollection(getSteps().stream().map(s -> (ObservableClusterStateWaitStep) s).collect(Collectors.toList())); out.writeBoolean(isSafeAction()); } } diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineRequest.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineRequest.java index 72b7e2f6d037b..488faad11d214 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineRequest.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineRequest.java @@ -26,7 +26,7 @@ public GetPipelineRequest(List ids) { public GetPipelineRequest(StreamInput in) throws IOException { super(in); - ids = in.readStringList(); + ids = in.readStringCollectionAsList(); } public List ids() { diff --git a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineResponse.java b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineResponse.java index f28edd25bf483..7713b3d05a541 100644 --- a/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineResponse.java +++ b/x-pack/plugin/logstash/src/main/java/org/elasticsearch/xpack/logstash/action/GetPipelineResponse.java @@ -38,7 +38,7 @@ public Map pipelines() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(pipelines, StreamOutput::writeString, StreamOutput::writeBytesReference); + out.writeMap(pipelines, StreamOutput::writeBytesReference); } @Override diff --git a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java index 04554b577b481..1c5e8d05ccf38 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java +++ b/x-pack/plugin/mapper-unsigned-long/src/main/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldMapper.java @@ -460,8 +460,8 @@ protected static Long parseLowerRangeTerm(Object value, boolean include) { } String stringValue = (value instanceof BytesRef) ? ((BytesRef) value).utf8ToString() : value.toString(); final BigDecimal bigDecimalValue = new BigDecimal(stringValue); // throws an exception if it is an improper number - if (bigDecimalValue.compareTo(BigDecimal.ZERO) <= 0) { - return 0L; // for values <=0, set lowerTerm to 0 + if (bigDecimalValue.compareTo(BigDecimal.ZERO) < 0) { + return 0L; // for values < 0, set lowerTerm to 0 } int c = bigDecimalValue.compareTo(BIGDECIMAL_2_64_MINUS_ONE); if (c > 0 || (c == 0 && include == false)) { diff --git a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldTypeTests.java b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldTypeTests.java index 9f14c0a022645..bf37412099f57 100644 --- a/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldTypeTests.java +++ b/x-pack/plugin/mapper-unsigned-long/src/test/java/org/elasticsearch/xpack/unsignedlong/UnsignedLongFieldTypeTests.java @@ -125,6 +125,7 @@ public void testParseLowerTermForRangeQuery() { assertEquals(0L, parseLowerRangeTerm(0L, true).longValue()); assertEquals(0L, parseLowerRangeTerm("0", true).longValue()); assertEquals(0L, parseLowerRangeTerm("0.0", true).longValue()); + assertEquals(1L, parseLowerRangeTerm("0", false).longValue()); assertEquals(1L, parseLowerRangeTerm("0.5", true).longValue()); assertEquals(9223372036854775807L, parseLowerRangeTerm(9223372036854775806L, false).longValue()); assertEquals(9223372036854775807L, parseLowerRangeTerm(9223372036854775807L, true).longValue()); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java index f125b274830ae..a3106eac4ab22 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelIT.java @@ -737,7 +737,7 @@ public void testStopUsedDeploymentByIngestProcessor() throws IOException { ) ); - stopDeployment(modelId, true); + stopDeployment(modelId, true, false); } public void testStopWithModelAliasUsedDeploymentByIngestProcessor() throws IOException { @@ -769,7 +769,7 @@ public void testStopWithModelAliasUsedDeploymentByIngestProcessor() throws IOExc + " by ingest processors; use force to stop the deployment" ) ); - stopDeployment(modelId, true); + stopDeployment(modelId, true, false); } public void testInferenceProcessorWithModelAlias() throws IOException { diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java index cbd90c26df3b2..b278f9fe9e466 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/PyTorchModelRestTestCase.java @@ -283,15 +283,21 @@ protected Response startDeployment( } protected void stopDeployment(String modelId) throws IOException { - stopDeployment(modelId, false); + stopDeployment(modelId, false, false); } - protected void stopDeployment(String modelId, boolean force) throws IOException { + protected void stopDeployment(String modelId, boolean force, boolean finishPendingWork) throws IOException { String endpoint = "/_ml/trained_models/" + modelId + "/deployment/_stop"; + + Request request = new Request("POST", endpoint); if (force) { - endpoint += "?force=true"; + request.addParameter("force", "true"); } - Request request = new Request("POST", endpoint); + + if (finishPendingWork) { + request.addParameter("finish_pending_work", "true"); + } + client().performRequest(request); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/StopDeploymentGracefullyIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/StopDeploymentGracefullyIT.java new file mode 100644 index 0000000000000..97ffaacff4395 --- /dev/null +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/StopDeploymentGracefullyIT.java @@ -0,0 +1,49 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.integration; + +import org.elasticsearch.client.Response; +import org.elasticsearch.xpack.core.ml.inference.assignment.AllocationStatus; +import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; + +import java.io.IOException; +import java.util.List; + +public class StopDeploymentGracefullyIT extends PyTorchModelRestTestCase { + + @SuppressWarnings("unchecked") + public void testStopDeploymentGracefully() throws IOException { + String baseModelId = "base-model"; + putAllModelParts(baseModelId); + + String forSearchDeploymentId = "for-search"; + startDeployment(baseModelId, forSearchDeploymentId, AllocationStatus.State.STARTED, 1, 1, Priority.LOW); + + Response inference = infer("my words", forSearchDeploymentId); + assertOK(inference); + + assertInferenceCountOnDeployment(1, forSearchDeploymentId); + + // infer by model Id + inference = infer("my words", baseModelId); + assertOK(inference); + assertInferenceCountOnModel(2, baseModelId); + + stopDeployment(forSearchDeploymentId, false, true); + } + + private void putAllModelParts(String modelId) throws IOException { + createPassThroughModel(modelId); + putModelDefinition(modelId); + putVocabulary(List.of("these", "are", "my", "words"), modelId); + } + + private void putModelDefinition(String modelId) throws IOException { + putModelDefinition(modelId, PyTorchModelIT.BASE_64_ENCODED_MODEL, PyTorchModelIT.RAW_MODEL_SIZE); + } +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/DefaultMachineLearningExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/DefaultMachineLearningExtension.java index 8cb8863711745..fdc2c91cea3de 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/DefaultMachineLearningExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/DefaultMachineLearningExtension.java @@ -7,7 +7,21 @@ package org.elasticsearch.xpack.ml; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.index.mapper.MapperService; + public class DefaultMachineLearningExtension implements MachineLearningExtension { + + public static final String[] ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS = { + IndexMetadata.SETTING_NUMBER_OF_SHARDS, + IndexMetadata.SETTING_NUMBER_OF_REPLICAS, + MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), + MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), + MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), + MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey(), + MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), + MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING.getKey() }; + @Override public boolean useIlm() { return true; @@ -32,4 +46,9 @@ public boolean isDataFrameAnalyticsEnabled() { public boolean isNlpEnabled() { return true; } + + @Override + public String[] getAnalyticsDestIndexAllowedSettings() { + return ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS; + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index 3f62dc10c6fa8..59f3f41371b53 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -1160,7 +1160,8 @@ public Collection createComponents( dataFrameAnalyticsAuditor, indexNameExpressionResolver, resultsPersisterService, - modelLoadingService + modelLoadingService, + machineLearningExtension.get().getAnalyticsDestIndexAllowedSettings() ); this.dataFrameAnalyticsManager.set(dataFrameAnalyticsManager); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java index b626cfcbca5cb..24b93301f37ac 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearningExtension.java @@ -22,4 +22,8 @@ default void configure(Settings settings) {} boolean isDataFrameAnalyticsEnabled(); boolean isNlpEnabled(); + + default String[] getAnalyticsDestIndexAllowedSettings() { + return DefaultMachineLearningExtension.ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS; + } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index b528e09b2b009..2a9b133cb11dd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -166,7 +166,7 @@ public void close() { private synchronized void scheduleNext() { try { - cancellable = threadPool.schedule(this::triggerTasks, schedulerProvider.get(), ThreadPool.Names.GENERIC); + cancellable = threadPool.schedule(this::triggerTasks, schedulerProvider.get(), threadPool.generic()); } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { logger.debug("failed to schedule next maintenance task; shutting down", e); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index 82e67f217b5fb..c6a360a018e2a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -13,8 +13,11 @@ import org.elasticsearch.common.component.LifecycleListener; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; import org.elasticsearch.xpack.ml.datafeed.DatafeedRunner; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; +import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; @@ -29,6 +32,8 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; +import static org.elasticsearch.core.Strings.format; + public class MlLifeCycleService { /** @@ -103,12 +108,42 @@ static boolean isNodeSafeToShutdown(String nodeId, ClusterState state, Instant s return true; } + logger.debug(() -> format("Checking shutdown safety for node id [%s]", nodeId)); + + boolean nodeHasRunningDeployments = nodeHasRunningDeployments(nodeId, state); + + logger.debug(() -> format("Node id [%s] has running deployments: %s", nodeId, nodeHasRunningDeployments)); + PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); - // TODO: currently only considering anomaly detection jobs - could extend in the future // Ignore failed jobs - the persistent task still exists to remember the failure (because no // persistent task means closed), but these don't need to be relocated to another node. return MlTasks.nonFailedJobTasksOnNode(tasks, nodeId).isEmpty() - && MlTasks.nonFailedSnapshotUpgradeTasksOnNode(tasks, nodeId).isEmpty(); + && MlTasks.nonFailedSnapshotUpgradeTasksOnNode(tasks, nodeId).isEmpty() + && nodeHasRunningDeployments == false; + } + + private static boolean nodeHasRunningDeployments(String nodeId, ClusterState state) { + TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.fromState(state); + + return metadata.allAssignments().values().stream().anyMatch(assignment -> { + if (assignment.isRoutedToNode(nodeId)) { + RoutingInfo routingInfo = assignment.getNodeRoutingTable().get(nodeId); + logger.debug( + () -> format( + "Assignment deployment id [%s] is routed to shutting down nodeId %s state: %s", + assignment.getDeploymentId(), + nodeId, + routingInfo.getState() + ) + ); + + // A routing could exist in the stopped state if the deployment has successfully drained any remaining requests + // If a route is starting, started, or stopping then the node is not ready to shut down yet + return routingInfo.getState().isNoneOf(RoutingState.STOPPED, RoutingState.FAILED); + } + + return false; + }); } /** diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetMlAutoscalingStats.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetMlAutoscalingStats.java index c2acd97e53c95..7ce5fd0a66eb2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetMlAutoscalingStats.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetMlAutoscalingStats.java @@ -33,6 +33,8 @@ import org.elasticsearch.xpack.ml.autoscaling.MlAutoscalingResourceTracker; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; +import java.util.concurrent.Executor; + /** * Internal (no-REST) transport to retrieve metrics for serverless autoscaling. */ @@ -41,6 +43,7 @@ public class TransportGetMlAutoscalingStats extends TransportMasterNodeAction MlAutoscalingResourceTracker.getMlAutoscalingStats( state, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index 1275ff812f858..632b456a17a00 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -25,6 +25,7 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.persistent.PersistentTasksClusterService; @@ -337,7 +338,7 @@ private void normalStopDatafeed( threadPool.schedule( () -> doExecute(task, request, listener, attempt + 1), TimeValue.timeValueMillis(100L * attempt), - ThreadPool.Names.SAME + EsExecutors.DIRECT_EXECUTOR_SERVICE ); } else { listener.onFailure( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java index dd2a2db1bb789..c14c5d4aadf5b 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java @@ -262,6 +262,7 @@ protected void taskOperation( ) { task.stop( "undeploy_trained_model (api)", + request.shouldFinishPendingWork(), ActionListener.wrap(r -> listener.onResponse(new StopTrainedModelDeploymentAction.Response(true)), listener::onFailure) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java index 7efc0de20e091..8df56d9df9c2f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/CategorizeTextAggregationBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AbstractAggregationBuilder; @@ -50,7 +51,7 @@ public class CategorizeTextAggregationBuilder extends AbstractAggregationBuilder // some nodes are pre-8.3 and others are newer, so we throw an error in // this situation. The aggregation was experimental at the time this change // was made, so this is acceptable. - public static final TransportVersion ALGORITHM_CHANGED_VERSION = TransportVersion.V_8_3_0; + public static final TransportVersion ALGORITHM_CHANGED_VERSION = TransportVersions.V_8_3_0; static final ParseField FIELD_NAME = new ParseField("field"); static final ParseField SIMILARITY_THRESHOLD = new ParseField("similarity_threshold"); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java index 9d63eeb8d3817..0ed673ac5a365 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/categorization/InternalCategorizationAggregation.java @@ -248,7 +248,7 @@ public InternalCategorizationAggregation(StreamInput in) throws IOException { ); } this.similarityThreshold = in.readVInt(); - this.buckets = in.readList(Bucket::new); + this.buckets = in.readCollectionAsList(Bucket::new); this.requiredSize = readSize(in); this.minDocCount = in.readVLong(); } @@ -266,7 +266,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { ); } out.writeVInt(similarityThreshold); - out.writeList(buckets); + out.writeCollection(buckets); writeSize(requiredSize, out); out.writeVLong(minDocCount); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java index 3aa4c71ad41c9..1a82c5310332f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/changepoint/ChangePointAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.aggs.changepoint; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; @@ -59,7 +60,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_2_0; + return TransportVersions.V_8_2_0; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java index 3503cc20d1e89..9db6849b86873 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/correlation/BucketCorrelationAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.aggs.correlation; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.pipeline.BucketHelpers; @@ -137,6 +138,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_14_0; + return TransportVersions.V_7_14_0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java index 9a8a47991af45..f6affc3d12340 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/EclatMapReducer.java @@ -101,7 +101,7 @@ Map getProfilingInfo() { @Override public void writeTo(StreamOutput out) throws IOException { out.writeArray(frequentItemSets); - out.writeMap(profilingInfo, StreamOutput::writeString, StreamOutput::writeGenericValue); + out.writeMap(profilingInfo, StreamOutput::writeGenericValue); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java index 397ddae669aa3..18086748d6fe0 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetCollector.java @@ -97,7 +97,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMapOfLists(fields, StreamOutput::writeString, StreamOutput::writeGenericValue); + out.writeMap(fields, (o, v) -> o.writeCollection(v, StreamOutput::writeGenericValue)); out.writeVLong(getDocCount()); out.writeDouble(support); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java index 3cb864b2da09b..9081f4118f89f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/FrequentItemSetsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.aggs.frequentitemsets; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -160,16 +161,16 @@ public FrequentItemSetsAggregationBuilder( public FrequentItemSetsAggregationBuilder(StreamInput in) throws IOException { super(in); - this.fields = in.readList(MultiValuesSourceFieldConfig::new); + this.fields = in.readCollectionAsList(MultiValuesSourceFieldConfig::new); this.minimumSupport = in.readDouble(); this.minimumSetSize = in.readVInt(); this.size = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { this.filter = in.readOptionalNamedWriteable(QueryBuilder.class); } else { this.filter = null; } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { this.executionHint = in.readOptionalString(); } else { this.executionHint = null; @@ -198,14 +199,14 @@ public String getType() { @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeList(fields); + out.writeCollection(fields); out.writeDouble(minimumSupport); out.writeVInt(minimumSetSize); out.writeVInt(size); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeOptionalNamedWriteable(filter); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_7_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_7_0)) { out.writeOptionalString(executionHint); } } @@ -265,7 +266,7 @@ public FrequentItemSetsAggregationBuilder subAggregation(AggregationBuilder aggr @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_4_0; + return TransportVersions.V_8_4_0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ImmutableTransactionStore.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ImmutableTransactionStore.java index 878d646160320..f6d62b7b0f8dd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ImmutableTransactionStore.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/ImmutableTransactionStore.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.ml.aggs.frequentitemsets; import org.apache.lucene.util.RamUsageEstimator; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.BigArrays; @@ -82,7 +82,7 @@ public ImmutableTransactionStore(StreamInput in, BigArrays bigArrays) throws IOE } this.totalTransactionCount = in.readVLong(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { this.filteredTransactionCount = in.readVLong(); } else { this.filteredTransactionCount = 0; @@ -158,7 +158,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(transactionCounts.get(i)); } out.writeVLong(totalTransactionCount); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_6_0)) { out.writeVLong(filteredTransactionCount); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregation.java index a4e8d88a299a2..1a1e8783e6efe 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregation.java @@ -87,7 +87,7 @@ public InternalItemSetMapReduceAggregation( this.mapReduceResult = this.mapReducer.readResult(in, bigArraysForMapReduce); } - this.fields = in.readList(Field::new); + this.fields = in.readCollectionAsList(Field::new); this.profiling = in.readBoolean(); } @@ -101,7 +101,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { mapReducer.writeTo(out); out.writeOptionalWriteable(mapFinalContext); out.writeOptionalWriteable(mapReduceResult); - out.writeList(fields); + out.writeCollection(fields); out.writeBoolean(profiling); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java index ec25edbf513ab..cac9d88256696 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/inference/InferencePipelineAggregationBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.Strings; @@ -252,7 +253,7 @@ protected void validate(ValidationContext context) { @Override protected void doWriteTo(StreamOutput out) throws IOException { out.writeString(modelId); - out.writeMap(bucketPathMap, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(bucketPathMap, StreamOutput::writeString); out.writeOptionalNamedWriteable(inferenceConfig); } @@ -380,6 +381,6 @@ public boolean equals(Object obj) { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_9_0; + return TransportVersions.V_7_9_0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java index 147d7831af5dc..dd204840e6fae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/BucketCountKSTestAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.aggs.kstest; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; @@ -185,6 +186,6 @@ public int hashCode() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_14_0; + return TransportVersions.V_7_14_0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/InternalKSTestAggregation.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/InternalKSTestAggregation.java index b67acc4c1eba4..9c9ff5eb65693 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/InternalKSTestAggregation.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/aggs/kstest/InternalKSTestAggregation.java @@ -40,7 +40,7 @@ public String getWriteableName() { @Override protected void doWriteTo(StreamOutput out) throws IOException { - out.writeMap(modeValues, StreamOutput::writeString, StreamOutput::writeDouble); + out.writeMap(modeValues, StreamOutput::writeDouble); } Map getModeValues() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java index d97b5ff6ba691..bb124f11ad637 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlScalingReason.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ml.autoscaling; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; @@ -46,11 +46,11 @@ public class MlScalingReason implements AutoscalingDeciderResult.Reason { private final String simpleReason; public MlScalingReason(StreamInput in) throws IOException { - this.waitingAnalyticsJobs = in.readStringList(); - this.waitingAnomalyJobs = in.readStringList(); - this.waitingSnapshotUpgrades = in.readStringList(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { - this.waitingModels = in.readStringList(); + this.waitingAnalyticsJobs = in.readStringCollectionAsList(); + this.waitingAnomalyJobs = in.readStringCollectionAsList(); + this.waitingSnapshotUpgrades = in.readStringCollectionAsList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { + this.waitingModels = in.readStringCollectionAsList(); } else { this.waitingModels = List.of(); } @@ -136,7 +136,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(this.waitingAnalyticsJobs); out.writeStringCollection(this.waitingAnomalyJobs); out.writeStringCollection(this.waitingSnapshotUpgrades); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_0_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_0_0)) { out.writeStringCollection(this.waitingModels); } this.passedConfiguration.writeTo(out); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java index 380b47a23e5bd..3df50cf7f62ed 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedConfigAutoUpdater.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.ClusterState; @@ -42,7 +43,7 @@ public DatafeedConfigAutoUpdater(DatafeedConfigProvider provider, IndexNameExpre @Override public boolean isMinTransportVersionSupported(TransportVersion minNodeVersion) { - return minNodeVersion.onOrAfter(TransportVersion.V_8_0_0); + return minNodeVersion.onOrAfter(TransportVersions.V_8_0_0); } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java index 5341b44b013cc..799d884342226 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java @@ -341,7 +341,7 @@ protected void doRun() { doDatafeedRealtime(nextDelayInMsSinceEpoch, jobId, holder); } } - }, delay, MachineLearning.DATAFEED_THREAD_POOL_NAME); + }, delay, threadPool.executor(MachineLearning.DATAFEED_THREAD_POOL_NAME)); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java index d70d34126fe27..e7aba2211b2df 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/extractor/scroll/ScrollDataExtractor.java @@ -18,6 +18,7 @@ import org.elasticsearch.action.search.SearchScrollAction; import org.elasticsearch.action.search.SearchScrollRequestBuilder; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.io.stream.BytesStreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -30,8 +31,6 @@ import org.elasticsearch.xpack.ml.datafeed.DatafeedTimingStatsReporter; import org.elasticsearch.xpack.ml.extractor.ExtractedField; -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.InputStream; import java.util.NoSuchElementException; @@ -169,18 +168,6 @@ private SearchRequestBuilder buildSearchRequest(long start) { return searchRequestBuilder; } - /** - * Utility class to convert ByteArrayOutputStream to ByteArrayInputStream without copying the underlying buffer. - */ - private static class ConvertableByteArrayOutputStream extends ByteArrayOutputStream { - public ByteArrayInputStream resetThisAndGetByteArrayInputStream() { - ByteArrayInputStream inputStream = new ByteArrayInputStream(buf, 0, count); - buf = new byte[0]; - count = 0; - return inputStream; - } - } - /** * IMPORTANT: This is not an idempotent method. This method changes the input array by setting each element to null. */ @@ -192,7 +179,7 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep return null; } - ConvertableByteArrayOutputStream outputStream = new ConvertableByteArrayOutputStream(); + BytesStreamOutput outputStream = new BytesStreamOutput(); SearchHit lastHit = hits[hits.length - 1]; lastTimestamp = context.extractedFields.timeFieldValue(lastHit); @@ -217,7 +204,7 @@ private InputStream processAndConsumeSearchHits(SearchHit hits[]) throws IOExcep hits[i] = null; } } - return outputStream.resetThisAndGetByteArrayInputStream(); + return outputStream.bytes().streamInput(); } private InputStream continueScroll() throws IOException { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index 718b70fb8801b..8ad7cd92a8e73 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -68,6 +68,7 @@ public class DataFrameAnalyticsManager { private final IndexNameExpressionResolver expressionResolver; private final ResultsPersisterService resultsPersisterService; private final ModelLoadingService modelLoadingService; + private final String[] destIndexAllowedSettings; /** Indicates whether the node is shutting down. */ private final AtomicBoolean nodeShuttingDown = new AtomicBoolean(); @@ -81,7 +82,8 @@ public DataFrameAnalyticsManager( DataFrameAnalyticsAuditor auditor, IndexNameExpressionResolver expressionResolver, ResultsPersisterService resultsPersisterService, - ModelLoadingService modelLoadingService + ModelLoadingService modelLoadingService, + String[] destIndexAllowedSettings ) { this.settings = Objects.requireNonNull(settings); this.client = Objects.requireNonNull(client); @@ -93,6 +95,7 @@ public DataFrameAnalyticsManager( this.expressionResolver = Objects.requireNonNull(expressionResolver); this.resultsPersisterService = Objects.requireNonNull(resultsPersisterService); this.modelLoadingService = Objects.requireNonNull(modelLoadingService); + this.destIndexAllowedSettings = Objects.requireNonNull(destIndexAllowedSettings); } public void execute(DataFrameAnalyticsTask task, ClusterState clusterState, TimeValue masterNodeTimeout) { @@ -185,7 +188,11 @@ private void determineProgressAndResume(DataFrameAnalyticsTask task, DataFrameAn LOGGER.debug(() -> format("[%s] Starting job from state [%s]", config.getId(), startingState)); switch (startingState) { - case FIRST_TIME -> executeStep(task, config, new ReindexingStep(clusterService, client, task, auditor, config)); + case FIRST_TIME -> executeStep( + task, + config, + new ReindexingStep(clusterService, client, task, auditor, config, destIndexAllowedSettings) + ); case RESUMING_REINDEXING -> executeJobInMiddleOfReindexing(task, config); case RESUMING_ANALYZING -> executeStep(task, config, new AnalysisStep(client, task, auditor, config, processManager)); case RESUMING_INFERENCE -> buildInferenceStep( @@ -236,14 +243,21 @@ private void executeJobInMiddleOfReindexing(DataFrameAnalyticsTask task, DataFra ML_ORIGIN, DeleteIndexAction.INSTANCE, new DeleteIndexRequest(config.getDest().getIndex()), - ActionListener.wrap(r -> executeStep(task, config, new ReindexingStep(clusterService, client, task, auditor, config)), e -> { - Throwable cause = ExceptionsHelper.unwrapCause(e); - if (cause instanceof IndexNotFoundException) { - executeStep(task, config, new ReindexingStep(clusterService, client, task, auditor, config)); - } else { - task.setFailed(e); + ActionListener.wrap( + r -> executeStep(task, config, new ReindexingStep(clusterService, client, task, auditor, config, destIndexAllowedSettings)), + e -> { + Throwable cause = ExceptionsHelper.unwrapCause(e); + if (cause instanceof IndexNotFoundException) { + executeStep( + task, + config, + new ReindexingStep(clusterService, client, task, auditor, config, destIndexAllowedSettings) + ); + } else { + task.setFailed(e); + } } - }) + ) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java index b4db9b76f6e70..6073029fcf0b9 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndex.java @@ -25,14 +25,12 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Nullable; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexModule; import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.index.mapper.NumberFieldMapper; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; @@ -109,27 +107,28 @@ public static void createDestinationIndex( Client client, Clock clock, DataFrameAnalyticsConfig analyticsConfig, + String[] destIndexAllowedSettings, ActionListener listener ) { - ActionListener createIndexRequestListener = ActionListener.wrap( - createIndexRequest -> ClientHelper.executeWithHeadersAsync( + ActionListener createIndexRequestListener = ActionListener.wrap(createIndexRequest -> { + ClientHelper.executeWithHeadersAsync( analyticsConfig.getHeaders(), ClientHelper.ML_ORIGIN, client, CreateIndexAction.INSTANCE, createIndexRequest, listener - ), - listener::onFailure - ); + ); + }, listener::onFailure); - prepareCreateIndexRequest(client, clock, analyticsConfig, createIndexRequestListener); + prepareCreateIndexRequest(client, clock, analyticsConfig, destIndexAllowedSettings, createIndexRequestListener); } private static void prepareCreateIndexRequest( Client client, Clock clock, DataFrameAnalyticsConfig config, + String[] destIndexAllowedSettings, ActionListener listener ) { AtomicReference settingsHolder = new AtomicReference<>(); @@ -150,7 +149,7 @@ private static void prepareCreateIndexRequest( }, listener::onFailure); ActionListener getSettingsResponseListener = ActionListener.wrap( - settingsResponse -> settingsListener.onResponse(settings(settingsResponse)), + settingsResponse -> settingsListener.onResponse(settings(settingsResponse, destIndexAllowedSettings)), listener::onFailure ); @@ -211,20 +210,10 @@ private static CreateIndexRequest createIndexRequest( return new CreateIndexRequest(destinationIndex, settings).mapping(mappingsAsMap); } - private static Settings settings(GetSettingsResponse settingsResponse) { - String[] settingsIndexKeys = { - IndexMetadata.SETTING_NUMBER_OF_SHARDS, - IndexMetadata.SETTING_NUMBER_OF_REPLICAS, - MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), - MapperService.INDEX_MAPPING_DEPTH_LIMIT_SETTING.getKey(), - MapperService.INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING.getKey(), - MapperService.INDEX_MAPPING_NESTED_DOCS_LIMIT_SETTING.getKey(), - MapperService.INDEX_MAPPING_FIELD_NAME_LENGTH_LIMIT_SETTING.getKey(), - MapperService.INDEX_MAPPING_DIMENSION_FIELDS_LIMIT_SETTING.getKey() }; - + private static Settings settings(GetSettingsResponse settingsResponse, String[] destIndexAllowedSettings) { Settings.Builder settingsBuilder = Settings.builder(); - for (String key : settingsIndexKeys) { + for (String key : destIndexAllowedSettings) { Long value = findMaxSettingValue(settingsResponse, key); if (value != null) { settingsBuilder.put(key, value); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java index 1d063faefd968..1ca78df1fad3d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/steps/ReindexingStep.java @@ -60,6 +60,7 @@ public class ReindexingStep extends AbstractDataFrameAnalyticsStep { private static final Logger LOGGER = LogManager.getLogger(ReindexingStep.class); private final ClusterService clusterService; + private final String[] destIndexAllowedSettings; @Nullable private volatile Long reindexingTaskId; private volatile boolean isReindexingFinished; @@ -69,10 +70,12 @@ public ReindexingStep( NodeClient client, DataFrameAnalyticsTask task, DataFrameAnalyticsAuditor auditor, - DataFrameAnalyticsConfig config + DataFrameAnalyticsConfig config, + String[] destIndexAllowedSettings ) { super(client, task, auditor, config); this.clusterService = Objects.requireNonNull(clusterService); + this.destIndexAllowedSettings = Objects.requireNonNull(destIndexAllowedSettings); } @Override @@ -208,7 +211,13 @@ protected void doExecute(ActionListener listener) { Messages.getMessage(Messages.DATA_FRAME_ANALYTICS_AUDIT_CREATING_DEST_INDEX, config.getDest().getIndex()) ); LOGGER.info("[{}] Creating destination index [{}]", config.getId(), config.getDest().getIndex()); - DestinationIndex.createDestinationIndex(parentTaskClient, Clock.systemUTC(), config, copyIndexCreatedListener); + DestinationIndex.createDestinationIndex( + parentTaskClient, + Clock.systemUTC(), + config, + destIndexAllowedSettings, + copyIndexCreatedListener + ); } else { copyIndexCreatedListener.onFailure(e); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java index 9e4018cb2747a..0847479489ec2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/ModelAliasMetadata.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.inference; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; @@ -112,12 +113,12 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_13_0; + return TransportVersions.V_7_13_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(this.modelAliases, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); + out.writeMap(this.modelAliases, StreamOutput::writeWriteable); } public String getModelId(String modelAlias) { @@ -162,7 +163,7 @@ public void writeTo(StreamOutput out) throws IOException { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_13_0; + return TransportVersions.V_7_13_0; } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java index b6c6f0865aad8..9c3a157e0de13 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsService.java @@ -161,7 +161,7 @@ void start() { scheduledFuture = threadPool.scheduleWithFixedDelay( this::updateStats, PERSISTENCE_INTERVAL, - MachineLearning.UTILITY_THREAD_POOL_NAME + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) ); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index 80c2b5f7cf2ca..efc8bd84c6350 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -12,6 +12,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.cluster.ClusterChangedEvent; @@ -62,13 +63,15 @@ import java.util.stream.Collectors; import static org.elasticsearch.core.Strings.format; +import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.NODES_CHANGED_REASON; +import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.createShuttingDownRoute; public class TrainedModelAssignmentClusterService implements ClusterStateListener { private static final Logger logger = LogManager.getLogger(TrainedModelAssignmentClusterService.class); - private static final TransportVersion RENAME_ALLOCATION_TO_ASSIGNMENT_TRANSPORT_VERSION = TransportVersion.V_8_3_0; - public static final TransportVersion DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION = TransportVersion.V_8_4_0; + private static final TransportVersion RENAME_ALLOCATION_TO_ASSIGNMENT_TRANSPORT_VERSION = TransportVersions.V_8_3_0; + public static final TransportVersion DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION = TransportVersions.V_8_4_0; private final ClusterService clusterService; private final ThreadPool threadPool; @@ -232,6 +235,8 @@ static ClusterState removeRoutingToUnassignableNodes(ClusterState currentState) Set assignableNodes = getAssignableNodes(currentState).stream().map(DiscoveryNode::getId).collect(Collectors.toSet()); TrainedModelAssignmentMetadata metadata = TrainedModelAssignmentMetadata.fromState(currentState); TrainedModelAssignmentMetadata.Builder builder = TrainedModelAssignmentMetadata.builder(currentState); + Set shuttingDownNodes = currentState.metadata().nodeShutdowns().getAllNodeIds(); + for (TrainedModelAssignment assignment : metadata.allAssignments().values()) { Set routedNodeIdsToRemove = Sets.difference(assignment.getNodeRoutingTable().keySet(), assignableNodes); if (routedNodeIdsToRemove.isEmpty() == false) { @@ -242,14 +247,69 @@ static ClusterState removeRoutingToUnassignableNodes(ClusterState currentState) routedNodeIdsToRemove ) ); - TrainedModelAssignment.Builder assignmentBuilder = TrainedModelAssignment.Builder.fromAssignment(assignment); - routedNodeIdsToRemove.forEach(assignmentBuilder::removeRoutingEntry); + + /* + * This code is to handle the following edge case. + * + * This code was added in 8.11.0. This code path is hit if the version of a node in the cluster is less than 8.4.0. + * - A rolling upgrade is performed from a version less than 8.4.0 + * - The versions of the nodes in the cluster is mixed between 8.11.0 and less than 8.4.0 + * - The master node upgrades to 8.11.0 (which will have this code change) + * - An ML node upgrades to 8.11.0 and begins shutting down + * - A data node exists on a version less than 8.4.0 + * + * The ML node that is shutting down will go through the graceful shutdown by having any routes referencing it, set to + * stopping. The TrainedModelAssignmentNodeService will be notified of the change and see that the route is in stopping + * and complete any remaining work in the processes before stopping them. + * + * If in the future we can simplify and remove this edge case code that'd be ideal. + */ + TrainedModelAssignment.Builder assignmentBuilder = removeRoutingBuilder( + routedNodeIdsToRemove, + shuttingDownNodes, + assignment + ); + builder.updateAssignment(assignment.getDeploymentId(), assignmentBuilder.calculateAndSetAssignmentState()); } } return update(currentState, builder); } + private static TrainedModelAssignment.Builder removeRoutingBuilder( + Set nodeIds, + Set shuttingDownNodes, + TrainedModelAssignment assignment + ) { + TrainedModelAssignment.Builder assignmentBuilder = TrainedModelAssignment.Builder.fromAssignment(assignment); + + for (String nodeIdToRemove : nodeIds) { + RoutingInfo routingInfoToRemove = assignment.getNodeRoutingTable().get(nodeIdToRemove); + + if (shuttingDownNodes.contains(nodeIdToRemove) == false) { + logger.debug( + () -> format("[%s] Removing route for unassignable node id [%s]", assignment.getDeploymentId(), nodeIdToRemove) + ); + + assignmentBuilder.removeRoutingEntry(nodeIdToRemove); + } else if (routingInfoToRemove != null && routingInfoToRemove.getState().isAnyOf(RoutingState.STARTED, RoutingState.STARTING)) { + logger.debug( + () -> format( + "[%s] Found assignment with route to shutting down node id [%s], adding stopping route", + assignment.getDeploymentId(), + nodeIdToRemove + ) + ); + + RoutingInfo stoppingRouteInfo = createShuttingDownRoute(assignment.getNodeRoutingTable().get(nodeIdToRemove)); + assignmentBuilder.addOrOverwriteRoutingEntry(nodeIdToRemove, stoppingRouteInfo); + } + + } + + return assignmentBuilder; + } + public void updateModelRoutingTable( UpdateTrainedModelAssignmentRoutingInfoAction.Request request, ActionListener listener @@ -490,20 +550,87 @@ private TrainedModelAssignmentMetadata.Builder rebalanceAssignments( List nodes = getAssignableNodes(currentState); logger.debug(() -> format("assignable nodes are %s", nodes.stream().map(DiscoveryNode::getId).toList())); Map nodeLoads = detectNodeLoads(nodes, currentState); + TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.fromState(currentState); + TrainedModelAssignmentRebalancer rebalancer = new TrainedModelAssignmentRebalancer( - TrainedModelAssignmentMetadata.fromState(currentState), + currentMetadata, nodeLoads, nodeAvailabilityZoneMapper.buildMlNodesByAvailabilityZone(currentState), modelToAdd, allocatedProcessorsScale ); - TrainedModelAssignmentMetadata.Builder rebalanced = rebalancer.rebalance(); + + Set shuttingDownNodeIds = currentState.metadata().nodeShutdowns().getAllNodeIds(); + TrainedModelAssignmentMetadata.Builder rebalanced = setShuttingDownNodeRoutesToStopping( + currentMetadata, + shuttingDownNodeIds, + rebalancer.rebalance() + ); + if (modelToAdd.isPresent()) { checkModelIsFullyAllocatedIfScalingIsNotPossible(modelToAdd.get().getDeploymentId(), rebalanced, nodes); } + return rebalanced; } + // Default for testing + static TrainedModelAssignmentMetadata.Builder setShuttingDownNodeRoutesToStopping( + TrainedModelAssignmentMetadata currentMetadata, + Set shuttingDownNodeIds, + TrainedModelAssignmentMetadata.Builder builder + ) { + if (shuttingDownNodeIds.isEmpty()) { + return builder; + } + + for (TrainedModelAssignment existingAssignment : currentMetadata.allAssignments().values()) { + boolean foundShuttingDownNodeForAssignment = false; + + String existingDeploymentId = existingAssignment.getDeploymentId(); + TrainedModelAssignment.Builder assignmentBuilder = builder.hasModelDeployment(existingAssignment.getDeploymentId()) + ? builder.getAssignment(existingDeploymentId) + : TrainedModelAssignment.Builder.fromAssignment(existingAssignment) + /* + * If this code path happens that means that the assignment originally existed prior to the rebalance and then + * disappeared. This would be an anomaly so we'll set the assignment to stopping and attempt to gracefully shut down + * the native process. + */ + .stopAssignment(NODES_CHANGED_REASON) + // If there are other routes that are now outdated after the rebalance we don't want to include them, so let's start + // with a fresh table + .clearNodeRoutingTable(); + + for (String nodeId : shuttingDownNodeIds) { + if (existingAssignment.isRoutedToNode(nodeId) + && existingAssignment.getNodeRoutingTable() + .get(nodeId) + .getState() + .isAnyOf(RoutingState.STARTED, RoutingState.STARTING)) { + logger.debug( + () -> format( + "Found assignment deployment id: [%s] with route to shutting down node id: [%s], adding stopping route", + existingDeploymentId, + nodeId + ) + ); + + foundShuttingDownNodeForAssignment = true; + RoutingInfo stoppingRouteInfo = createShuttingDownRoute(existingAssignment.getNodeRoutingTable().get(nodeId)); + + assignmentBuilder.addOrOverwriteRoutingEntry(nodeId, stoppingRouteInfo); + } + } + + // if we didn't find a shutting down routing info then we don't want to add an empty assignment here + if (foundShuttingDownNodeForAssignment) { + builder.addOrOverwriteAssignment(existingDeploymentId, assignmentBuilder); + } + } + + return builder; + } + private void checkModelIsFullyAllocatedIfScalingIsNotPossible( String modelId, TrainedModelAssignmentMetadata.Builder assignments, @@ -807,7 +934,7 @@ static Optional detectReasonToRebalanceModels(final ClusterChangedEvent return detectReasonIfMlJobsStopped(event).or(() -> { String reason = null; if (haveMlNodesChanged(event, newMetadata)) { - reason = "nodes changed"; + reason = NODES_CHANGED_REASON; } else if (newMetadata.hasOutdatedAssignments()) { reason = "outdated assignments detected"; } @@ -906,7 +1033,10 @@ static boolean haveMlNodesChanged(ClusterChangedEvent event, TrainedModelAssignm continue; } for (var nodeId : exitingShutDownNodes) { - if (trainedModelAssignment.isRoutedToNode(nodeId)) { + if (trainedModelAssignment.isRoutedToNode(nodeId) + // If the route is stopping then it's draining its queue or being forced to stop so let that continue + // and don't try to rebalance until it has completely finished + && trainedModelAssignment.getNodeRoutingTable().get(nodeId).getState() != RoutingState.STOPPING) { logger.debug( () -> format( "should rebalance because model deployment [%s] has allocations on shutting down node [%s]", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java index 18cca8b65544b..8391d287a6847 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentMetadata.java @@ -10,6 +10,7 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.DiffableUtils; @@ -155,12 +156,12 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(deploymentRoutingEntries, StreamOutput::writeString, (o, w) -> w.writeTo(o)); + out.writeMap(deploymentRoutingEntries, StreamOutput::writeWriteable); } @Override @@ -235,6 +236,14 @@ public Builder updateAssignment(String deploymentId, TrainedModelAssignment.Buil return this; } + /** + * Adds the assignment regardless of whether it already exists. + */ + public Builder addOrOverwriteAssignment(String deploymentId, TrainedModelAssignment.Builder assignment) { + deploymentRoutingEntries.put(deploymentId, assignment); + return this; + } + public TrainedModelAssignment.Builder getAssignment(String deploymentId) { return deploymentRoutingEntries.get(deploymentId); } @@ -311,7 +320,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_0_0; + return TransportVersions.V_8_0_0; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java index 8554e4120775a..f1b9298b05ca4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeService.java @@ -33,6 +33,7 @@ import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.action.UpdateTrainedModelAssignmentRoutingInfoAction; +import org.elasticsearch.xpack.core.ml.inference.assignment.AssignmentState; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfoUpdate; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; @@ -51,17 +52,21 @@ import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Collections; import java.util.Deque; import java.util.List; import java.util.Map; import java.util.Optional; +import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentLinkedDeque; +import java.util.function.Consumer; import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.xpack.core.ml.MlTasks.TRAINED_MODEL_ASSIGNMENT_TASK_ACTION; import static org.elasticsearch.xpack.core.ml.MlTasks.TRAINED_MODEL_ASSIGNMENT_TASK_TYPE; import static org.elasticsearch.xpack.ml.MachineLearning.ML_PYTORCH_MODEL_INFERENCE_FEATURE; +import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentUtils.NODE_IS_SHUTTING_DOWN; public class TrainedModelAssignmentNodeService implements ClusterStateListener { @@ -146,30 +151,12 @@ public void beforeStop() { this.expressionResolver = expressionResolver; } - void stopDeploymentAsync(TrainedModelDeploymentTask task, String reason, ActionListener listener) { - if (stopped) { - return; - } - task.markAsStopped(reason); - - threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { - try { - deploymentManager.stopDeployment(task); - taskManager.unregister(task); - deploymentIdToTask.remove(task.getDeploymentId()); - listener.onResponse(null); - } catch (Exception e) { - listener.onFailure(e); - } - }); - } - public void start() { stopped = false; scheduledFuture = threadPool.scheduleWithFixedDelay( this::loadQueuedModels, MODEL_LOADING_CHECK_INTERVAL, - MachineLearning.UTILITY_THREAD_POOL_NAME + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) ); } @@ -241,22 +228,38 @@ void loadQueuedModels() { loadingModels.addAll(loadingToRetry); } + public void gracefullyStopDeploymentAndNotify( + TrainedModelDeploymentTask task, + String reason, + ActionListener listener + ) { + logger.debug(() -> format("[%s] Gracefully stopping deployment due to reason %s", task.getDeploymentId(), reason)); + + stopAndNotifyHelper(task, reason, listener, deploymentManager::stopAfterCompletingPendingWork); + } + public void stopDeploymentAndNotify(TrainedModelDeploymentTask task, String reason, ActionListener listener) { - final RoutingInfoUpdate updateToStopped = RoutingInfoUpdate.updateStateAndReason( - new RoutingStateAndReason(RoutingState.STOPPED, reason) - ); + logger.debug(() -> format("[%s] Forcefully stopping deployment due to reason %s", task.getDeploymentId(), reason)); + + stopAndNotifyHelper(task, reason, listener, deploymentManager::stopDeployment); + } + + private void stopAndNotifyHelper( + TrainedModelDeploymentTask task, + String reason, + ActionListener listener, + Consumer stopDeploymentFunc + ) { + // Removing the entry from the map to avoid the possibility of a node shutdown triggering a concurrent graceful stopping of the + // process while we are attempting to forcefully stop the native process + // The graceful stopping will only occur if there is an entry in the map + deploymentIdToTask.remove(task.getDeploymentId()); + ActionListener notifyDeploymentOfStopped = updateRoutingStateToStoppedListener(task.getDeploymentId(), reason, listener); - ActionListener notifyDeploymentOfStopped = ActionListener.wrap( - _void -> updateStoredState(task.getDeploymentId(), updateToStopped, listener), - failed -> { // if we failed to stop the process, something strange is going on, but we should still notify of stop - logger.warn(() -> "[" + task.getDeploymentId() + "] failed to stop due to error", failed); - updateStoredState(task.getDeploymentId(), updateToStopped, listener); - } - ); updateStoredState( task.getDeploymentId(), RoutingInfoUpdate.updateStateAndReason(new RoutingStateAndReason(RoutingState.STOPPING, reason)), - ActionListener.wrap(success -> stopDeploymentAsync(task, reason, notifyDeploymentOfStopped), e -> { + ActionListener.wrap(success -> stopDeploymentHelper(task, reason, stopDeploymentFunc, notifyDeploymentOfStopped), e -> { if (ExceptionsHelper.unwrapCause(e) instanceof ResourceNotFoundException) { logger.debug( () -> format("[%s] failed to set routing state to stopping as assignment already removed", task.getDeploymentId()), @@ -267,7 +270,7 @@ public void stopDeploymentAndNotify(TrainedModelDeploymentTask task, String reas // TODO this means requests may still be routed here, should we not stop deployment? logger.warn(() -> "[" + task.getDeploymentId() + "] failed to set routing state to stopping due to error", e); } - stopDeploymentAsync(task, reason, notifyDeploymentOfStopped); + stopDeploymentHelper(task, reason, stopDeploymentFunc, notifyDeploymentOfStopped); }) ); } @@ -330,84 +333,216 @@ public Task createTask(long id, String type, String action, TaskId parentTaskId, @Override public void clusterChanged(ClusterChangedEvent event) { latestState = event.state(); - if (event.metadataChanged()) { - final boolean isResetMode = MlMetadata.getMlMetadata(event.state()).isResetMode(); - TrainedModelAssignmentMetadata modelAssignmentMetadata = TrainedModelAssignmentMetadata.fromState(event.state()); - final String currentNode = event.state().nodes().getLocalNodeId(); - final boolean isNewAllocationSupported = event.state() - .getMinTransportVersion() - .onOrAfter(TrainedModelAssignmentClusterService.DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION); - - if (isResetMode == false && isNewAllocationSupported) { - updateNumberOfAllocations(modelAssignmentMetadata); - } + if (event.metadataChanged() == false) { + return; + } - for (TrainedModelAssignment trainedModelAssignment : modelAssignmentMetadata.allAssignments().values()) { - RoutingInfo routingInfo = trainedModelAssignment.getNodeRoutingTable().get(currentNode); - // Add new models to start loading - if (routingInfo != null && isNewAllocationSupported) { - if (routingInfo.getState() == RoutingState.STARTING - && deploymentIdToTask.containsKey(trainedModelAssignment.getDeploymentId()) - && deploymentIdToTask.get(trainedModelAssignment.getDeploymentId()).isFailed()) { - // This is a failed assignment and we are restarting it. For this we need to remove the task first. - taskManager.unregister(deploymentIdToTask.get(trainedModelAssignment.getDeploymentId())); - deploymentIdToTask.remove(trainedModelAssignment.getDeploymentId()); + final boolean isResetMode = MlMetadata.getMlMetadata(event.state()).isResetMode(); + TrainedModelAssignmentMetadata modelAssignmentMetadata = TrainedModelAssignmentMetadata.fromState(event.state()); + final String currentNode = event.state().nodes().getLocalNodeId(); + final boolean isNewAllocationSupported = event.state() + .getMinTransportVersion() + .onOrAfter(TrainedModelAssignmentClusterService.DISTRIBUTED_MODEL_ALLOCATION_TRANSPORT_VERSION); + final Set shuttingDownNodes = Collections.unmodifiableSet(event.state().metadata().nodeShutdowns().getAllNodeIds()); + + if (isResetMode == false && isNewAllocationSupported) { + updateNumberOfAllocations(modelAssignmentMetadata); + } + + for (TrainedModelAssignment trainedModelAssignment : modelAssignmentMetadata.allAssignments().values()) { + RoutingInfo routingInfo = trainedModelAssignment.getNodeRoutingTable().get(currentNode); + if (routingInfo != null) { + // Add new models to start loading if the assignment is not stopping + if (isNewAllocationSupported && trainedModelAssignment.getAssignmentState() != AssignmentState.STOPPING) { + if (shouldAssignmentBeRestarted(routingInfo, trainedModelAssignment.getDeploymentId())) { + prepareAssignmentForRestart(trainedModelAssignment); } - if (routingInfo.getState().isAnyOf(RoutingState.STARTING, RoutingState.STARTED) // periodic retries of `failed` should - // be handled in a separate process - // This means we don't already have a task and should attempt creating one and starting the model loading - // If we don't have a task but are STARTED, this means the cluster state had a started assignment, - // the node crashed and then started again - && deploymentIdToTask.containsKey(trainedModelAssignment.getDeploymentId()) == false - // If we are in reset mode, don't start loading a new model on this node. - && isResetMode == false) { + + if (shouldLoadModel(routingInfo, trainedModelAssignment.getDeploymentId(), isResetMode)) { prepareModelToLoad( - new StartTrainedModelDeploymentAction.TaskParams( - trainedModelAssignment.getTaskParams().getModelId(), - trainedModelAssignment.getDeploymentId(), - trainedModelAssignment.getTaskParams().getModelBytes(), - routingInfo.getCurrentAllocations(), - trainedModelAssignment.getTaskParams().getThreadsPerAllocation(), - trainedModelAssignment.getTaskParams().getQueueCapacity(), - trainedModelAssignment.getTaskParams().getCacheSize().orElse(null), - trainedModelAssignment.getTaskParams().getPriority(), - trainedModelAssignment.getTaskParams().getPerDeploymentMemoryBytes(), - trainedModelAssignment.getTaskParams().getPerAllocationMemoryBytes() - ) + createStartTrainedModelDeploymentTaskParams(trainedModelAssignment, routingInfo.getCurrentAllocations()) ); } } - // This model is not routed to the current node at all - if (routingInfo == null) { - TrainedModelDeploymentTask task = deploymentIdToTask.remove(trainedModelAssignment.getDeploymentId()); - if (task != null) { - stopDeploymentAsync( - task, - NODE_NO_LONGER_REFERENCED, - ActionListener.wrap( - r -> logger.trace(() -> "[" + task.getDeploymentId() + "] stopped deployment"), - e -> logger.warn(() -> "[" + task.getDeploymentId() + "] failed to fully stop deployment", e) - ) - ); - } + + if (isAssignmentOnShuttingDownNode(routingInfo, trainedModelAssignment.getDeploymentId(), shuttingDownNodes, currentNode)) { + gracefullyStopDeployment(trainedModelAssignment.getDeploymentId(), currentNode); } - } - List toCancel = new ArrayList<>(); - for (String deploymentIds : Sets.difference(deploymentIdToTask.keySet(), modelAssignmentMetadata.allAssignments().keySet())) { - toCancel.add(deploymentIdToTask.remove(deploymentIds)); - } - // should all be stopped in the same executor thread? - for (TrainedModelDeploymentTask t : toCancel) { - stopDeploymentAsync( - t, - ASSIGNMENT_NO_LONGER_EXISTS, - ActionListener.wrap( - r -> logger.trace(() -> "[" + t.getDeploymentId() + "] stopped deployment"), - e -> logger.warn(() -> "[" + t.getDeploymentId() + "] failed to fully stop deployment", e) - ) - ); + } else { + stopUnreferencedDeployment(trainedModelAssignment.getDeploymentId(), currentNode); } } + + List toCancel = new ArrayList<>(); + for (String deploymentIds : Sets.difference(deploymentIdToTask.keySet(), modelAssignmentMetadata.allAssignments().keySet())) { + toCancel.add(deploymentIdToTask.remove(deploymentIds)); + } + // should all be stopped in the same executor thread? + for (TrainedModelDeploymentTask t : toCancel) { + stopDeploymentAsync( + t, + ASSIGNMENT_NO_LONGER_EXISTS, + ActionListener.wrap( + r -> logger.trace(() -> "[" + t.getDeploymentId() + "] stopped deployment"), + e -> logger.warn(() -> "[" + t.getDeploymentId() + "] failed to fully stop deployment", e) + ) + ); + } + } + + private boolean shouldAssignmentBeRestarted(RoutingInfo routingInfo, String deploymentId) { + return routingInfo.getState() == RoutingState.STARTING + && deploymentIdToTask.containsKey(deploymentId) + && deploymentIdToTask.get(deploymentId).isFailed(); + } + + private void prepareAssignmentForRestart(TrainedModelAssignment trainedModelAssignment) { + // This is a failed assignment and we are restarting it. For this we need to remove the task first. + taskManager.unregister(deploymentIdToTask.get(trainedModelAssignment.getDeploymentId())); + deploymentIdToTask.remove(trainedModelAssignment.getDeploymentId()); + } + + private boolean shouldLoadModel(RoutingInfo routingInfo, String deploymentId, boolean isResetMode) { + return routingInfo.getState().isAnyOf(RoutingState.STARTING, RoutingState.STARTED) // periodic retries of `failed` + // should + // be handled in a separate process + // This means we don't already have a task and should attempt creating one and starting the model loading + // If we don't have a task but are STARTED, this means the cluster state had a started assignment, + // the node crashed and then started again + && deploymentIdToTask.containsKey(deploymentId) == false + // If we are in reset mode, don't start loading a new model on this node. + && isResetMode == false; + } + + private static StartTrainedModelDeploymentAction.TaskParams createStartTrainedModelDeploymentTaskParams( + TrainedModelAssignment trainedModelAssignment, + int currentAllocations + ) { + return new StartTrainedModelDeploymentAction.TaskParams( + trainedModelAssignment.getTaskParams().getModelId(), + trainedModelAssignment.getDeploymentId(), + trainedModelAssignment.getTaskParams().getModelBytes(), + currentAllocations, + trainedModelAssignment.getTaskParams().getThreadsPerAllocation(), + trainedModelAssignment.getTaskParams().getQueueCapacity(), + trainedModelAssignment.getTaskParams().getCacheSize().orElse(null), + trainedModelAssignment.getTaskParams().getPriority(), + trainedModelAssignment.getTaskParams().getPerDeploymentMemoryBytes(), + trainedModelAssignment.getTaskParams().getPerAllocationMemoryBytes() + ); + } + + private boolean isAssignmentOnShuttingDownNode( + RoutingInfo routingInfo, + String deploymentId, + Set shuttingDownNodes, + String currentNode + ) { + return deploymentIdToTask.containsKey(deploymentId) + && routingInfo.getState() == RoutingState.STOPPING + && shuttingDownNodes.contains(currentNode); + } + + private void gracefullyStopDeployment(String deploymentId, String currentNode) { + logger.debug(() -> format("[%s] Gracefully stopping deployment for shutting down node %s", deploymentId, currentNode)); + + TrainedModelDeploymentTask task = deploymentIdToTask.remove(deploymentId); + if (task == null) { + logger.debug( + () -> format( + "[%s] Unable to gracefully stop deployment for shutting down node %s because task does not exit", + deploymentId, + currentNode + ) + ); + return; + } + + ActionListener routingStateListener = ActionListener.wrap( + r -> logger.debug( + () -> format("[%s] Gracefully stopped deployment for shutting down node %s", task.getDeploymentId(), currentNode) + ), + e -> logger.error( + () -> format("[%s] Failed to gracefully stop deployment for shutting down node %s", task.getDeploymentId(), currentNode), + e + ) + ); + + ActionListener notifyDeploymentOfStopped = updateRoutingStateToStoppedListener( + task.getDeploymentId(), + NODE_IS_SHUTTING_DOWN, + routingStateListener + ); + + stopDeploymentAfterCompletingPendingWorkAsync(task, notifyDeploymentOfStopped); + } + + private ActionListener updateRoutingStateToStoppedListener( + String deploymentId, + String reason, + ActionListener listener + ) { + final RoutingInfoUpdate updateToStopped = RoutingInfoUpdate.updateStateAndReason( + new RoutingStateAndReason(RoutingState.STOPPED, reason) + ); + + return ActionListener.wrap(_void -> { + logger.debug(() -> format("[%s] Updating routing state to stopped", deploymentId)); + updateStoredState(deploymentId, updateToStopped, listener); + }, e -> { + // if we failed to stop the process, something strange is going on, but we should set the routing state to stopped + logger.warn(() -> format("[%s] Failed to stop deployment due to error", deploymentId), e); + updateStoredState(deploymentId, updateToStopped, listener); + }); + } + + private void stopUnreferencedDeployment(String deploymentId, String currentNode) { + // This model is not routed to the current node at all + TrainedModelDeploymentTask task = deploymentIdToTask.remove(deploymentId); + if (task == null) { + return; + } + + logger.debug(() -> format("[%s] Stopping unreferenced deployment for node %s", deploymentId, currentNode)); + stopDeploymentAsync( + task, + NODE_NO_LONGER_REFERENCED, + ActionListener.wrap( + r -> logger.trace(() -> "[" + task.getDeploymentId() + "] stopped deployment"), + e -> logger.warn(() -> "[" + task.getDeploymentId() + "] failed to fully stop deployment", e) + ) + ); + } + + private void stopDeploymentAsync(TrainedModelDeploymentTask task, String reason, ActionListener listener) { + stopDeploymentHelper(task, reason, deploymentManager::stopDeployment, listener); + } + + private void stopDeploymentHelper( + TrainedModelDeploymentTask task, + String reason, + Consumer stopDeploymentFunc, + ActionListener listener + ) { + if (stopped) { + return; + } + task.markAsStopped(reason); + + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { + try { + stopDeploymentFunc.accept(task); + taskManager.unregister(task); + deploymentIdToTask.remove(task.getDeploymentId()); + listener.onResponse(null); + } catch (Exception e) { + listener.onFailure(e); + } + }); + } + + private void stopDeploymentAfterCompletingPendingWorkAsync(TrainedModelDeploymentTask task, ActionListener listener) { + stopDeploymentHelper(task, NODE_IS_SHUTTING_DOWN, deploymentManager::stopAfterCompletingPendingWork, listener); } private void updateNumberOfAllocations(TrainedModelAssignmentMetadata assignments) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java new file mode 100644 index 0000000000000..257c944c08605 --- /dev/null +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentUtils.java @@ -0,0 +1,28 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ml.inference.assignment; + +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfoUpdate; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingStateAndReason; + +public class TrainedModelAssignmentUtils { + public static final String NODES_CHANGED_REASON = "nodes changed"; + public static final String NODE_IS_SHUTTING_DOWN = "node is shutting down"; + + public static RoutingInfo createShuttingDownRoute(RoutingInfo existingRoute) { + RoutingInfoUpdate routeUpdate = RoutingInfoUpdate.updateStateAndReason( + new RoutingStateAndReason(RoutingState.STOPPING, NODE_IS_SHUTTING_DOWN) + ); + + return routeUpdate.apply(existingRoute); + } + + private TrainedModelAssignmentUtils() {} +} diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractPyTorchAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractPyTorchAction.java index 1a3d8e2b06120..64d3808e9ed8f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractPyTorchAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/AbstractPyTorchAction.java @@ -56,7 +56,11 @@ protected AbstractPyTorchAction( @Override public final void init() { if (this.timeoutHandler == null) { - this.timeoutHandler = threadPool.schedule(this::onTimeout, timeout, MachineLearning.UTILITY_THREAD_POOL_NAME); + this.timeoutHandler = threadPool.schedule( + this::onTimeout, + timeout, + threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) + ); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java index 2cc774bff2823..21bd66e6f35ea 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/DeploymentManager.java @@ -16,6 +16,7 @@ import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.support.master.AcknowledgedResponse; import org.elasticsearch.client.internal.Client; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.core.TimeValue; @@ -57,6 +58,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Consumer; @@ -151,7 +154,7 @@ public void startDeployment(TrainedModelDeploymentTask task, ActionListener failedDeploymentListener = ActionListener.wrap(finalListener::onResponse, failure -> { ProcessContext failedContext = processContextByAllocation.remove(task.getId()); if (failedContext != null) { - failedContext.stopProcess(); + failedContext.forcefullyStopProcess(); } finalListener.onFailure(failure); }); @@ -244,12 +247,26 @@ public void stopDeployment(TrainedModelDeploymentTask task) { ProcessContext processContext = processContextByAllocation.remove(task.getId()); if (processContext != null) { logger.info("[{}] Stopping deployment, reason [{}]", task.getDeploymentId(), task.stoppedReason().orElse("unknown")); - processContext.stopProcess(); + processContext.forcefullyStopProcess(); } else { logger.warn("[{}] No process context to stop", task.getDeploymentId()); } } + public void stopAfterCompletingPendingWork(TrainedModelDeploymentTask task) { + ProcessContext processContext = processContextByAllocation.remove(task.getId()); + if (processContext != null) { + logger.info( + "[{}] Stopping deployment after completing pending tasks, reason [{}]", + task.getDeploymentId(), + task.stoppedReason().orElse("unknown") + ); + processContext.stopProcessAfterCompletingPendingWork(); + } else { + logger.warn("[{}] No process context to stop gracefully", task.getDeploymentId()); + } + } + public void infer( TrainedModelDeploymentTask task, InferenceConfig config, @@ -369,6 +386,7 @@ private ProcessContext getProcessContext(TrainedModelDeploymentTask task, Consum class ProcessContext { + private static final String PROCESS_NAME = "inference process"; private final TrainedModelDeploymentTask task; private final SetOnce process = new SetOnce<>(); private final SetOnce nlpTaskProcessor = new SetOnce<>(); @@ -383,6 +401,8 @@ class ProcessContext { private final AtomicInteger timeoutCount = new AtomicInteger(); private volatile boolean isStopped; + private static final TimeValue COMPLETION_TIMEOUT = TimeValue.timeValueMinutes(3); + ProcessContext(TrainedModelDeploymentTask task) { this.task = Objects.requireNonNull(task); resultProcessor = new PyTorchResultProcessor(task.getDeploymentId(), threadSettings -> { @@ -397,7 +417,7 @@ class ProcessContext { this.stateStreamer = new PyTorchStateStreamer(client, executorServiceForProcess, xContentRegistry); this.priorityProcessWorker = new PriorityProcessWorkerExecutorService( threadPool.getThreadContext(), - "inference process", + PROCESS_NAME, task.getParams().getQueueCapacity() ); } @@ -442,22 +462,32 @@ void startPriorityProcessWorker() { executorServiceForProcess.submit(priorityProcessWorker::start); } - synchronized void stopProcess() { - isStopped = true; - resultProcessor.stop(); - stateStreamer.cancel(); + synchronized void forcefullyStopProcess() { + logger.debug(() -> format("[%s] Forcefully stopping process", task.getDeploymentId())); + prepareInternalStateForShutdown(); if (priorityProcessWorker.isShutdown()) { // most likely there was a crash or exception that caused the // thread to stop. Notify any waiting requests in the work queue - priorityProcessWorker.notifyQueueRunnables(); + handleAlreadyShuttingDownWorker(); } else { priorityProcessWorker.shutdown(); } + killProcessIfPresent(); - if (nlpTaskProcessor.get() != null) { - nlpTaskProcessor.get().close(); - } + closeNlpTaskProcessor(); + } + + private void prepareInternalStateForShutdown() { + isStopped = true; + resultProcessor.stop(); + stateStreamer.cancel(); + } + + private void handleAlreadyShuttingDownWorker() { + logger.debug(() -> format("[%s] Process worker was already marked for shutdown", task.getDeploymentId())); + + priorityProcessWorker.notifyQueueRunnables(); } private void killProcessIfPresent() { @@ -471,13 +501,83 @@ private void killProcessIfPresent() { } } + private void closeNlpTaskProcessor() { + if (nlpTaskProcessor.get() != null) { + nlpTaskProcessor.get().close(); + } + } + + private synchronized void stopProcessAfterCompletingPendingWork() { + logger.debug(() -> format("[%s] Stopping process after completing its pending work", task.getDeploymentId())); + prepareInternalStateForShutdown(); + + if (priorityProcessWorker.isShutdown()) { + // most likely there was a crash or exception that caused the + // thread to stop. Notify any waiting requests in the work queue + handleAlreadyShuttingDownWorker(); + } else { + signalAndWaitForWorkerTermination(); + } + + stopProcessGracefully(); + closeNlpTaskProcessor(); + } + + private void signalAndWaitForWorkerTermination() { + try { + awaitTerminationAfterCompletingWork(); + } catch (TimeoutException e) { + logger.warn(format("[%s] Timed out waiting for process worker to complete, forcing a shutdown", task.getDeploymentId()), e); + // The process failed to stop in the time period allotted, so we'll mark it for shut down + priorityProcessWorker.shutdown(); + priorityProcessWorker.notifyQueueRunnables(); + } + } + + private void awaitTerminationAfterCompletingWork() throws TimeoutException { + try { + priorityProcessWorker.shutdown(); + + if (priorityProcessWorker.awaitTermination(COMPLETION_TIMEOUT.getMinutes(), TimeUnit.MINUTES) == false) { + throw new TimeoutException( + Strings.format("Timed out waiting for process worker to complete for process %s", PROCESS_NAME) + ); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info(Strings.format("[%s] Interrupted waiting for process worker to complete", PROCESS_NAME)); + } + } + + private void stopProcessGracefully() { + try { + closeProcessIfPresent(); + resultProcessor.awaitCompletion(COMPLETION_TIMEOUT.getMinutes(), TimeUnit.MINUTES); + } catch (TimeoutException e) { + logger.warn(format("[%s] Timed out waiting for results processor to stop", task.getDeploymentId()), e); + } + } + + private void closeProcessIfPresent() { + try { + if (process.get() == null) { + return; + } + + process.get().close(); + } catch (IOException e) { + logger.error(format("[%s] Failed to stop process gracefully, attempting to kill it", task.getDeploymentId()), e); + killProcessIfPresent(); + } + } + private void onProcessCrash(String reason) { logger.error("[{}] inference process crashed due to reason [{}]", task.getDeploymentId(), reason); processContextByAllocation.remove(task.getId()); isStopped = true; resultProcessor.stop(); stateStreamer.cancel(); - priorityProcessWorker.shutdownWithError(new IllegalStateException(reason)); + priorityProcessWorker.shutdownNowWithError(new IllegalStateException(reason)); if (nlpTaskProcessor.get() != null) { nlpTaskProcessor.get().close(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java index 85e0feb71b704..025d0ecca00d2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTask.java @@ -104,8 +104,13 @@ public TaskParams getParams() { return params; } - public void stop(String reason, ActionListener listener) { - trainedModelAssignmentNodeService.stopDeploymentAndNotify(this, reason, listener); + public void stop(String reason, boolean finishPendingWork, ActionListener listener) { + + if (finishPendingWork) { + trainedModelAssignmentNodeService.gracefullyStopDeploymentAndNotify(this, reason, listener); + } else { + trainedModelAssignmentNodeService.stopDeploymentAndNotify(this, reason, listener); + } } public void markAsStopped(String reason) { @@ -130,6 +135,7 @@ protected void onCancelled() { logger.info("[{}] task cancelled due to reason [{}]", getDeploymentId(), reason); stop( reason, + true, ActionListener.wrap( acknowledgedResponse -> {}, e -> logger.error(() -> "[" + getDeploymentId() + "] error stopping the deployment after task cancellation", e) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java index 9e545cf57cee5..11b699df66b83 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/nlp/Vocabulary.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.ml.inference.nlp; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -62,15 +62,15 @@ public Vocabulary(List vocab, String modelId, @Nullable List mer } public Vocabulary(StreamInput in) throws IOException { - vocab = in.readStringList(); + vocab = in.readStringCollectionAsList(); modelId = in.readString(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { - merges = in.readStringList(); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { + merges = in.readStringCollectionAsList(); } else { merges = List.of(); } - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { - scores = in.readList(StreamInput::readDouble); + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { + scores = in.readCollectionAsList(StreamInput::readDouble); } else { scores = List.of(); } @@ -92,10 +92,10 @@ public List scores() { public void writeTo(StreamOutput out) throws IOException { out.writeStringCollection(vocab); out.writeString(modelId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_2_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_2_0)) { out.writeStringCollection(merges); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_010)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeCollection(scores, StreamOutput::writeDouble); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java index 0c59b1fd224ff..6b92a9349c4ea 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessor.java @@ -24,6 +24,9 @@ import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.function.Consumer; import java.util.function.LongSupplier; @@ -64,6 +67,7 @@ public record ResultStats( private long lastResultTimeMs; private final long startTime; private final LongSupplier currentTimeMsSupplier; + private final CountDownLatch processorCompletionLatch = new CountDownLatch(1); public PyTorchResultProcessor(String modelId, Consumer threadSettingsConsumer) { this(modelId, threadSettingsConsumer, System::currentTimeMillis); @@ -129,6 +133,7 @@ public void process(PyTorchProcess process) { notifyAndClearPendingResults(errorResult); } finally { notifyAndClearPendingResults(new ErrorResult("inference canceled as process is stopping")); + processorCompletionLatch.countDown(); } logger.debug(() -> "[" + modelId + "] Results processing finished"); } @@ -286,6 +291,24 @@ public void stop() { isStopping = true; } + /** + * Waits for specified amount of time for the processor to complete. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout + * @throws TimeoutException if the results processor has not completed after exceeding the timeout period + */ + public void awaitCompletion(long timeout, TimeUnit unit) throws TimeoutException { + try { + if (processorCompletionLatch.await(timeout, unit) == false) { + throw new TimeoutException(format("Timed out waiting for pytorch results processor to complete for model id %s", modelId)); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + logger.info(format("[%s] Interrupted waiting for pytorch results processor to complete", modelId)); + } + } + public static class PendingResult { public final ActionListener listener; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java index 19a0a177f3ef9..46c9ae029e60f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/UpdateJobProcessNotifier.java @@ -79,7 +79,7 @@ boolean submitJobUpdate(UpdateParams update, ActionListener listener) { } private void start() { - cancellable = threadPool.scheduleWithFixedDelay(this::processNextUpdate, TimeValue.timeValueSeconds(1), ThreadPool.Names.GENERIC); + cancellable = threadPool.scheduleWithFixedDelay(this::processNextUpdate, TimeValue.timeValueSeconds(1), threadPool.generic()); } private void stop() { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/AbstractProcessWorkerExecutorService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/AbstractProcessWorkerExecutorService.java index a7517069dfd2e..dee608e69f5bb 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/AbstractProcessWorkerExecutorService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/AbstractProcessWorkerExecutorService.java @@ -22,6 +22,7 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Function; @@ -39,8 +40,8 @@ public abstract class AbstractProcessWorkerExecutorService e private final CountDownLatch awaitTermination = new CountDownLatch(1); protected final BlockingQueue queue; private final AtomicReference error = new AtomicReference<>(); - - private volatile boolean running = true; + private final AtomicBoolean running = new AtomicBoolean(true); + private final AtomicBoolean shouldShutdownAfterCompletingWork = new AtomicBoolean(false); /** * @param contextHolder the thread context holder @@ -65,24 +66,30 @@ public int queueSize() { return queue.size(); } - public void shutdownWithError(Exception e) { + public void shutdownNowWithError(Exception e) { error.set(e); - shutdown(); + shutdownNow(); } @Override public void shutdown() { - running = false; + shouldShutdownAfterCompletingWork.set(true); } + /** + * Some of the tasks in the returned list of {@link Runnable}s could have run. Some tasks may have run while the queue was being copied. + * + * @return a list of tasks that may not have been run + */ @Override public List shutdownNow() { - throw new UnsupportedOperationException("not supported"); + running.set(false); + return new ArrayList<>(queue); } @Override public boolean isShutdown() { - return running == false; + return running.get() == false || shouldShutdownAfterCompletingWork.get(); } @Override @@ -97,7 +104,7 @@ public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedE public void start() { try { - while (running) { + while (running.get()) { Runnable runnable = queue.poll(500, TimeUnit.MILLISECONDS); if (runnable != null) { try { @@ -106,6 +113,8 @@ public void start() { logger.error(() -> "error handling process [" + processName + "] operation", e); } EsExecutors.rethrowErrors(ThreadContext.unwrap(runnable)); + } else if (shouldShutdownAfterCompletingWork.get()) { + running.set(false); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java index 1136f4c083cc0..550742ef04b5a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicator.java @@ -179,7 +179,7 @@ public void close() { }); try { future.get(); - autodetectWorkerExecutor.shutdown(); + autodetectWorkerExecutor.shutdownNow(); dataCountsReporter.writeUnreportedCounts(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); @@ -203,7 +203,7 @@ public void killProcess(boolean awaitCompletion, boolean finish, boolean finaliz try { processKilled = true; autodetectResultProcessor.setProcessKilled(); - autodetectWorkerExecutor.shutdown(); + autodetectWorkerExecutor.shutdownNow(); autodetectProcess.kill(awaitCompletion); if (awaitCompletion) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java index 9cb1df0baddef..d69acab30451a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java @@ -384,7 +384,7 @@ void shutdown(Exception e) { if (process.isProcessAlive() == false) { logger.debug("[{}] [{}] process is dead, no need to shutdown", jobId, snapshotId); onFinish.accept(e); - autodetectWorkerExecutor.shutdown(); + autodetectWorkerExecutor.shutdownNow(); stateStreamer.cancel(); return; } @@ -408,7 +408,7 @@ void shutdown(Exception e) { }); try { future.get(); - autodetectWorkerExecutor.shutdown(); + autodetectWorkerExecutor.shutdownNow(); } catch (InterruptedException interrupt) { Thread.currentThread().interrupt(); } catch (ExecutionException executionException) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java index 4099ede5ef1d1..37f34565c1958 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/results/AutodetectResult.java @@ -139,12 +139,12 @@ public AutodetectResult(StreamInput in) throws IOException { this.bucket = null; } if (in.readBoolean()) { - this.records = in.readList(AnomalyRecord::new); + this.records = in.readCollectionAsList(AnomalyRecord::new); } else { this.records = null; } if (in.readBoolean()) { - this.influencers = in.readList(Influencer::new); + this.influencers = in.readCollectionAsList(Influencer::new); } else { this.influencers = null; } @@ -225,7 +225,7 @@ private static void writeNullable(List writeables, StreamOu boolean isPresent = writeables != null; out.writeBoolean(isPresent); if (isPresent) { - out.writeList(writeables); + out.writeCollection(writeables); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 7a027e2c5cfa4..b19c0fb670a59 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -13,6 +13,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.ResourceNotFoundException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.RetryableAction; import org.elasticsearch.client.internal.Client; @@ -83,7 +84,7 @@ public class OpenJobPersistentTasksExecutor extends AbstractJobPersistentTasksEx // Resuming a job with a running datafeed from its current snapshot was added in 7.11 and // can only be done if the master node is on or after that version. - private static final TransportVersion MIN_TRANSPORT_VERSION_FOR_REVERTING_TO_CURRENT_SNAPSHOT = TransportVersion.V_7_11_0; + private static final TransportVersion MIN_TRANSPORT_VERSION_FOR_REVERTING_TO_CURRENT_SNAPSHOT = TransportVersions.V_7_11_0; public static String[] indicesOfInterest(String resultsIndex) { if (resultsIndex == null) { @@ -485,7 +486,7 @@ private RevertToCurrentSnapshotAction(JobTask jobTask, ActionListener l // to be available so that and data deletion can succeed. TimeValue.timeValueMinutes(15), listener, - MachineLearning.UTILITY_THREAD_POOL_NAME + client.threadPool().executor(MachineLearning.UTILITY_THREAD_POOL_NAME) ); this.jobTask = Objects.requireNonNull(jobTask); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java index b749bb74dfbd8..2d74b1b34888f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/queries/TextExpansionQueryBuilder.java @@ -10,6 +10,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.io.stream.StreamInput; @@ -89,7 +90,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_8_0; + return TransportVersions.V_8_8_0; } @Override diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStopTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStopTrainedModelDeploymentAction.java index 36b8989e61ad8..f5c6144daf74a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/rest/inference/RestStopTrainedModelDeploymentAction.java @@ -55,6 +55,12 @@ protected RestChannelConsumer prepareRequest(RestRequest restRequest, NodeClient request.setForce( restRequest.paramAsBoolean(StopTrainedModelDeploymentAction.Request.FORCE.getPreferredName(), request.isForce()) ); + request.setFinishPendingWork( + restRequest.paramAsBoolean( + StopTrainedModelDeploymentAction.Request.FINISH_PENDING_WORK.getPreferredName(), + request.shouldFinishPendingWork() + ) + ); } return channel -> client.execute(StopTrainedModelDeploymentAction.INSTANCE, request, new RestToXContentListener<>(channel)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java index de418364732e0..ddcfcb7de08d4 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterService.java @@ -469,7 +469,7 @@ private abstract class MlRetryableAction extends RetryableAct TimeValue.timeValueMillis(MIN_RETRY_SLEEP_MILLIS), TimeValue.MAX_VALUE, listener, - UTILITY_THREAD_POOL_NAME + threadPool.executor(UTILITY_THREAD_POOL_NAME) ); this.jobId = jobId; this.shouldRetry = shouldRetry; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java index b5bfa4611ea4d..2dd76c8fab7cc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/vectors/TextEmbeddingQueryVectorBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ml.vectors; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.io.stream.StreamInput; @@ -71,7 +72,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_7_0; + return TransportVersions.V_8_7_0; } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java index dcd490aeb7ba3..d4b58e0da2fbe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java @@ -281,6 +281,8 @@ public void testNlpOnly() throws IOException { public static class MlTestExtension implements MachineLearningExtension { + public static final String[] ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS = {}; + private final boolean useIlm; private final boolean includeNodeInfo; private final boolean isAnomalyDetectionEnabled; @@ -325,6 +327,11 @@ public boolean isDataFrameAnalyticsEnabled() { public boolean isNlpEnabled() { return isNlpEnabled; } + + @Override + public String[] getAnalyticsDestIndexAllowedSettings() { + return ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS; + } } public static class MlTestExtensionLoader implements ExtensiblePlugin.ExtensionLoader { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java index 8fd67af397568..2f30d131021b4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlInitializationServiceTests.java @@ -15,18 +15,16 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; import org.junit.Before; import java.util.Map; -import java.util.concurrent.ExecutorService; import static org.hamcrest.Matchers.is; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -36,29 +34,18 @@ public class MlInitializationServiceTests extends ESTestCase { private static final ClusterName CLUSTER_NAME = new ClusterName("my_cluster"); private ThreadPool threadPool; - private ExecutorService executorService; private ClusterService clusterService; private Client client; private MlAssignmentNotifier mlAssignmentNotifier; @Before public void setUpMocks() { - threadPool = mock(ThreadPool.class); - executorService = mock(ExecutorService.class); + final var deterministicTaskQueue = new DeterministicTaskQueue(); + threadPool = deterministicTaskQueue.getThreadPool(); clusterService = mock(ClusterService.class); client = mock(Client.class); mlAssignmentNotifier = mock(MlAssignmentNotifier.class); - doAnswer(invocation -> { - ((Runnable) invocation.getArguments()[0]).run(); - return null; - }).when(executorService).execute(any(Runnable.class)); - when(threadPool.executor(ThreadPool.Names.GENERIC)).thenReturn(executorService); - when(threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME)).thenReturn(executorService); - - Scheduler.ScheduledCancellable scheduledCancellable = mock(Scheduler.ScheduledCancellable.class); - when(threadPool.schedule(any(), any(), any())).thenReturn(scheduledCancellable); - when(clusterService.getClusterName()).thenReturn(CLUSTER_NAME); @SuppressWarnings("unchecked") diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java index d712b24223a2a..f6c5924db37f8 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml; +import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -21,8 +22,12 @@ import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; +import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentTaskParamsTests; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; +import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; +import org.elasticsearch.xpack.core.ml.inference.assignment.TrainedModelAssignment; import org.elasticsearch.xpack.core.ml.job.config.JobState; import org.elasticsearch.xpack.core.ml.job.config.JobTaskState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; @@ -30,6 +35,7 @@ import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; import org.elasticsearch.xpack.ml.datafeed.DatafeedRunner; import org.elasticsearch.xpack.ml.dataframe.DataFrameAnalyticsManager; +import org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentMetadata; import org.elasticsearch.xpack.ml.job.process.autodetect.AutodetectProcessManager; import org.elasticsearch.xpack.ml.process.MlController; import org.elasticsearch.xpack.ml.process.MlMemoryTracker; @@ -175,6 +181,88 @@ public void testIsNodeSafeToShutdownGivenFailedTasks() { assertThat(isNodeSafeToShutdown("node-4", clusterState, shutdownStartTime, clock), is(true)); // has no ML tasks } + public void testIsNodeSafeToShutdownReturnsFalseWhenStartingDeploymentExists() { + String nodeId = "node-1"; + ClusterState currentState = ClusterState.builder(new ClusterName("test")) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + "1", + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ) + .build() + ) + .build() + ) + .build(); + + Clock clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); + + assertFalse(isNodeSafeToShutdown("node-1", currentState, null, clock)); + } + + public void testIsNodeSafeToShutdownReturnsFalseWhenStoppingAndStoppedDeploymentsExist() { + String nodeId = "node-1"; + ClusterState currentState = ClusterState.builder(new ClusterName("test")) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + "1", + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) + ) + .addNewAssignment( + "2", + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) + ) + .build() + ) + .build() + ) + .build(); + + Clock clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); + + assertFalse(isNodeSafeToShutdown("node-1", currentState, null, clock)); + } + + public void testIsNodeSafeToShutdownReturnsTrueWhenStoppedDeploymentsExist() { + String nodeId = "node-1"; + ClusterState currentState = ClusterState.builder(new ClusterName("test")) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + "1", + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) + ) + .addNewAssignment( + "2", + TrainedModelAssignment.Builder.empty(StartTrainedModelDeploymentTaskParamsTests.createRandom()) + .addRoutingEntry(nodeId, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) + ) + .build() + ) + .build() + ) + .build(); + + Clock clock = Clock.fixed(Instant.now(), ZoneId.systemDefault()); + + assertThat(isNodeSafeToShutdown("node-1", currentState, null, clock), is(true)); + } + public void testSignalGracefulShutdownIncludingLocalNode() { MlLifeCycleService mlLifeCycleService = new MlLifeCycleService( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java index a7e54c8957418..9151a88ef482d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlSingleNodeTestCase.java @@ -46,6 +46,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; @@ -165,7 +166,7 @@ protected static ThreadPool mockThreadPool() { doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[0]).run(); return null; - }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(String.class)); + }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(Executor.class)); return tp; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStoreTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStoreTests.java index c96864bba5840..7ce06de0b0fbe 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStoreTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/TransactionStoreTests.java @@ -11,6 +11,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.common.breaker.CircuitBreaker; import org.elasticsearch.common.breaker.CircuitBreakingException; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.LongArray; @@ -141,7 +142,7 @@ public void testDontLeakAfterDeserializing() throws IOException { storeCopy = copyInstance( store, writableRegistry(), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, in -> new HashBasedTransactionStore(in, mockBigArraysWithThrowingCircuitBreaker()), TransportVersion.current() ); @@ -165,7 +166,7 @@ public void testCreateImmutableTransactionStore() throws IOException { HashBasedTransactionStore storeCopy = copyInstance( store, writableRegistry(), - (out, value) -> value.writeTo(out), + StreamOutput::writeWriteable, in -> new HashBasedTransactionStore(in, mockBigArrays()), TransportVersion.current() ); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregationTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregationTests.java index 2e6d1e1d073ca..93ee8bec974a7 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregationTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/InternalItemSetMapReduceAggregationTests.java @@ -77,7 +77,7 @@ static class WordCounts implements ToXContent, Writeable, Closeable { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeMap(frequencies, StreamOutput::writeString, StreamOutput::writeLong); + out.writeMap(frequencies, StreamOutput::writeLong); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceValueSourceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceValueSourceTests.java index b9935469267bd..8176f9aac21cb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceValueSourceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/frequentitemsets/mr/ItemSetMapReduceValueSourceTests.java @@ -11,6 +11,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.network.InetAddresses; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.time.DateFormatter; @@ -90,6 +91,6 @@ public void testLong() throws IOException { } private Field copyField(Field field) throws IOException { - return copyInstance(field, writableRegistry(), (out, value) -> value.writeTo(out), Field::new, TransportVersion.current()); + return copyInstance(field, writableRegistry(), StreamOutput::writeWriteable, Field::new, TransportVersion.current()); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScoreTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScoreTests.java index f5291bd1b03ed..17898d7205b66 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScoreTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/aggs/heuristic/PValueScoreTests.java @@ -9,6 +9,7 @@ import org.apache.commons.math3.util.FastMath; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.search.SearchModule; @@ -32,7 +33,7 @@ public class PValueScoreTests extends AbstractNXYSignificanceHeuristicTestCase { @Override protected TransportVersion randomVersion() { - return randomFrom(TransportVersion.V_8_0_0, TransportVersion.V_7_16_0); + return randomFrom(TransportVersions.V_8_0_0, TransportVersions.V_7_16_0); } @Override diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java index 46ca91ea9a5c1..284ad48a9b3a4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java @@ -47,6 +47,7 @@ import java.net.InetAddress; import java.util.Collections; import java.util.Date; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicBoolean; @@ -167,7 +168,7 @@ public void testLookbackOnly_WarnsWhenNoDataIsRetrieved() throws Exception { datafeedRunner.run(task, handler); verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - verify(threadPool, never()).schedule(any(), any(), any()); + verify(threadPool, never()).schedule(any(), any(), any(Executor.class)); verify(auditor).warning(JOB_ID, "Datafeed lookback retrieved no data"); } @@ -178,7 +179,7 @@ public void testStart_GivenNewlyCreatedJobLookback() throws Exception { datafeedRunner.run(task, handler); verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - verify(threadPool, never()).schedule(any(), any(), any()); + verify(threadPool, never()).schedule(any(), any(), any(Executor.class)); } public void testStart_extractionProblem() throws Exception { @@ -188,7 +189,7 @@ public void testStart_extractionProblem() throws Exception { datafeedRunner.run(task, handler); verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - verify(threadPool, never()).schedule(any(), any(), any()); + verify(threadPool, never()).schedule(any(), any(), any(Executor.class)); verify(auditor, times(1)).error(eq(JOB_ID), anyString()); } @@ -202,7 +203,7 @@ public void testStart_emptyDataCountException() throws Exception { r.run(); } return mock(Scheduler.ScheduledCancellable.class); - }).when(threadPool).schedule(any(), any(), any()); + }).when(threadPool).schedule(any(), any(), any(Executor.class)); when(datafeedJob.runLookBack(anyLong(), anyLong())).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false)); when(datafeedJob.runRealtime()).thenThrow(new DatafeedJob.EmptyDataCountException(0L, false)); @@ -211,7 +212,7 @@ public void testStart_emptyDataCountException() throws Exception { DatafeedTask task = createDatafeedTask(DATAFEED_ID, 0L, null); datafeedRunner.run(task, handler); - verify(threadPool, times(11)).schedule(any(), any(), eq(MachineLearning.DATAFEED_THREAD_POOL_NAME)); + verify(threadPool, times(11)).schedule(any(), any(), any(Executor.class)); verify(auditor, times(1)).warning(eq(JOB_ID), anyString()); } @@ -261,13 +262,13 @@ public void testStart_GivenNewlyCreatedJobLookBackAndRealtime() throws Exception task = spyDatafeedTask(task); datafeedRunner.run(task, handler); - verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); + verify(threadPool, times(2)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); if (cancelled) { task.stop("test", StopDatafeedAction.DEFAULT_TIMEOUT); verify(handler).accept(null); assertThat(datafeedRunner.isRunning(task), is(false)); } else { - verify(threadPool, times(1)).schedule(any(), eq(new TimeValue(1)), eq(MachineLearning.DATAFEED_THREAD_POOL_NAME)); + verify(threadPool, times(1)).schedule(any(), eq(new TimeValue(1)), any(Executor.class)); assertThat(datafeedRunner.isRunning(task), is(true)); } } @@ -307,7 +308,7 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs)); // Now it should run as the job state changed to OPENED - verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); + verify(threadPool, times(2)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); } public void testDatafeedTaskWaitsUntilAutodetectCommunicatorIsOpen() { @@ -345,7 +346,7 @@ public void testDatafeedTaskWaitsUntilAutodetectCommunicatorIsOpen() { capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs, anotherJobCs)); // Now it should run as the autodetect communicator is open - verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); + verify(threadPool, times(2)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); } public void testDatafeedTaskWaitsUntilJobIsNotStale() { @@ -383,7 +384,7 @@ public void testDatafeedTaskWaitsUntilJobIsNotStale() { capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs)); // Now it should run as the job state chanded to OPENED - verify(threadPool, times(1)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); + verify(threadPool, times(2)).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); } public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java index 445149c9deb44..822b27400c419 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/DestinationIndexTests.java @@ -62,6 +62,7 @@ import static java.util.Collections.singletonMap; import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue; +import static org.elasticsearch.xpack.ml.DefaultMachineLearningExtension.ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS; import static org.hamcrest.Matchers.arrayContaining; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; @@ -332,6 +333,7 @@ private Map testCreateDestinationIndex(DataFrameAnalysis analysi client, clock, config, + ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS, ActionListener.wrap( response -> fail("should not succeed"), e -> assertThat(e.getMessage(), Matchers.matchesRegex(finalErrorMessage)) @@ -341,7 +343,13 @@ private Map testCreateDestinationIndex(DataFrameAnalysis analysi return null; } - DestinationIndex.createDestinationIndex(client, clock, config, ActionTestUtils.assertNoFailureListener(response -> {})); + DestinationIndex.createDestinationIndex( + client, + clock, + config, + ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS, + ActionTestUtils.assertNoFailureListener(response -> {}) + ); GetSettingsRequest capturedGetSettingsRequest = getSettingsRequestCaptor.getValue(); assertThat(capturedGetSettingsRequest.indices(), equalTo(SOURCE_INDEX)); @@ -569,6 +577,7 @@ public void testCreateDestinationIndex_ResultsFieldsExistsInSourceIndex() { client, clock, config, + ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS, ActionListener.wrap( response -> fail("should not succeed"), e -> assertThat( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameAnalyticsManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameAnalyticsManagerTests.java index 708fc7df78859..31194a1190368 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameAnalyticsManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/dataframe/process/DataFrameAnalyticsManagerTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.xpack.ml.notifications.DataFrameAnalyticsAuditor; import org.elasticsearch.xpack.ml.utils.persistence.ResultsPersisterService; +import static org.elasticsearch.xpack.ml.DefaultMachineLearningExtension.ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS; import static org.hamcrest.Matchers.is; import static org.mockito.Mockito.mock; @@ -34,7 +35,8 @@ public void testNodeShuttingDown() { mock(DataFrameAnalyticsAuditor.class), mock(IndexNameExpressionResolver.class), mock(ResultsPersisterService.class), - mock(ModelLoadingService.class) + mock(ModelLoadingService.class), + ANALYTICS_DEST_INDEX_ALLOWED_SETTINGS ); assertThat(manager.isNodeShuttingDown(), is(false)); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index 3471dc6a91958..9b79754a5afe9 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -394,6 +394,94 @@ public void testCreateAssignmentWhileResetModeIsTrue() throws InterruptedExcepti latch.await(); } + public void testHaveMlNodesChanged_ReturnsTrueWhenNodeShutsDownAndWasRoutedTo() { + String model1 = "model-1"; + String mlNode1 = "ml-node-with-room"; + String mlNode2 = "new-ml-node-with-room"; + + ClusterState stateWithOneNode = createClusterState( + List.of(mlNode1), + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + model1, + TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ) + .build() + ) + .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode1)) + .build() + ); + + ClusterState stateWithTwoNodes = createClusterState( + List.of(mlNode1, mlNode2), + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + model1, + TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ) + .build() + ) + .build() + ); + + var shutdownEvent = new ClusterChangedEvent("test", stateWithOneNode, stateWithTwoNodes); + var metadata = TrainedModelAssignmentMetadata.fromState(shutdownEvent.state()); + + assertThat(TrainedModelAssignmentClusterService.haveMlNodesChanged(shutdownEvent, metadata), is(true)); + } + + public void testHaveMlNodesChanged_ReturnsFalseWhenNodeShutsDownAndWasRoutedTo_ButRouteIsStopping() { + String model1 = "model-1"; + String mlNode1 = "ml-node-with-room"; + String mlNode2 = "new-ml-node-with-room"; + + ClusterState stateWithOneNode = createClusterState( + List.of(mlNode1), + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + model1, + TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) + ) + .build() + ) + .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode1)) + .build() + ); + + ClusterState stateWithTwoNodes = createClusterState( + List.of(mlNode1, mlNode2), + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + model1, + TrainedModelAssignment.Builder.empty(newParams(model1, 100)) + .addRoutingEntry(mlNode1, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ) + .build() + ) + .build() + ); + + var shutdownEvent = new ClusterChangedEvent("test", stateWithOneNode, stateWithTwoNodes); + var metadata = TrainedModelAssignmentMetadata.fromState(shutdownEvent.state()); + + assertThat(TrainedModelAssignmentClusterService.haveMlNodesChanged(shutdownEvent, metadata), is(false)); + } + public void testDetectReasonToRebalanceModels() { String model1 = "model-1"; String model2 = "model-2"; @@ -1321,7 +1409,64 @@ public void testAreAssignedNodesRemoved_GivenShuttingDownNodeThatIsNotRouted() { assertThat(TrainedModelAssignmentClusterService.areAssignedNodesRemoved(event), is(false)); } - public void testRemoveRoutingToUnassignableNodes() { + public void testRemoveRoutingToUnassignableNodes_RemovesRouteForRemovedNodes() { + String modelId1 = "model-1"; + String modelId2 = "model-2"; + String nodeId1 = "node-1"; + String nodeId2 = "node-2"; + String nodeId3 = "node-3"; + Metadata metadata = Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + modelId1, + TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L)) + .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .addNewAssignment( + modelId2, + TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L)) + .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .build() + ) + // This node should not affect the assignments because it is not routed to + .putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + nodeId3, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId3) + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(System.currentTimeMillis()) + .setReason("test") + .build() + ) + ) + ) + .build(); + // This simulates node2 being non-existent but not shutting down + ClusterState currentState = createClusterState(List.of(nodeId1, nodeId3), metadata); + + ClusterState resultState = TrainedModelAssignmentClusterService.removeRoutingToUnassignableNodes(currentState); + + TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = TrainedModelAssignmentMetadata.fromState(resultState); + assertThat(trainedModelAssignmentMetadata.allAssignments(), is(aMapWithSize(2))); + for (String modelId : List.of(modelId1, modelId2)) { + TrainedModelAssignment assignment = trainedModelAssignmentMetadata.getDeploymentAssignment(modelId); + assertThat(assignment, is(notNullValue())); + assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(1))); + assertThat(assignment.getNodeRoutingTable(), hasKey(nodeId1)); + assertThat(assignment.getNodeRoutingTable(), not(hasKey(nodeId3))); + + } + } + + public void testRemoveRoutingToUnassignableNodes_AddsAStoppingRouteForShuttingDownNodes() { String modelId1 = "model-1"; String modelId2 = "model-2"; String nodeId1 = "node-1"; @@ -1362,28 +1507,273 @@ public void testRemoveRoutingToUnassignableNodes() { ) ) .build(); - DiscoveryNode node1 = buildNode(nodeId1, true, ByteSizeValue.ofGb(4).getBytes(), 8); - DiscoveryNode node3 = buildNode(nodeId3, true, ByteSizeValue.ofGb(4).getBytes(), 8); - ClusterState currentState = ClusterState.builder(new ClusterName("testAreAssignedNodesRemoved")) - .nodes(DiscoveryNodes.builder().add(node1).add(node3).build()) - .putTransportVersion(nodeId1, TransportVersion.current()) - .putTransportVersion(nodeId3, TransportVersion.current()) - .metadata(metadata) + // This simulates node2 being non-existent but not shutting down + ClusterState currentState = createClusterState(List.of(nodeId1, nodeId3), metadata); + ClusterState resultState = TrainedModelAssignmentClusterService.removeRoutingToUnassignableNodes(currentState); + + TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = TrainedModelAssignmentMetadata.fromState(resultState); + assertThat(trainedModelAssignmentMetadata.allAssignments(), is(aMapWithSize(2))); + + for (String modelId : List.of(modelId1, modelId2)) { + TrainedModelAssignment assignment = trainedModelAssignmentMetadata.getDeploymentAssignment(modelId); + assertThat(assignment, is(notNullValue())); + assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(2))); + assertThat(assignment.getNodeRoutingTable(), hasKey(nodeId1)); + assertThat(assignment.getNodeRoutingTable().get(nodeId1).getState(), is(RoutingState.STARTED)); + assertThat(assignment.getNodeRoutingTable(), hasKey(nodeId3)); + assertThat(assignment.getNodeRoutingTable().get(nodeId3).getState(), is(RoutingState.STOPPING)); + assertThat(assignment.getNodeRoutingTable(), not(hasKey(nodeId2))); + } + } + + public void testRemoveRoutingToUnassignableNodes_IgnoresARouteThatIsStoppedForShuttingDownNode() { + String modelId1 = "model-1"; + String modelId2 = "model-2"; + String nodeId1 = "node-1"; + String nodeId2 = "node-2"; + String nodeId3 = "node-3"; + Metadata metadata = Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + modelId1, + TrainedModelAssignment.Builder.empty(newParams(modelId1, 10_000L)) + .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry(nodeId3, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) + ) + .addNewAssignment( + modelId2, + TrainedModelAssignment.Builder.empty(newParams(modelId2, 10_000L)) + .addRoutingEntry(nodeId1, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry(nodeId2, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + .addRoutingEntry(nodeId3, new RoutingInfo(1, 1, RoutingState.STOPPED, "")) + ) + .build() + ) + .putCustom( + NodesShutdownMetadata.TYPE, + new NodesShutdownMetadata( + Map.of( + nodeId3, + SingleNodeShutdownMetadata.builder() + .setNodeId(nodeId3) + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(System.currentTimeMillis()) + .setReason("test") + .build() + ) + ) + ) .build(); + // This simulates node2 being non-existent but not shutting down + ClusterState currentState = createClusterState(List.of(nodeId1, nodeId3), metadata); ClusterState resultState = TrainedModelAssignmentClusterService.removeRoutingToUnassignableNodes(currentState); TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = TrainedModelAssignmentMetadata.fromState(resultState); assertThat(trainedModelAssignmentMetadata.allAssignments(), is(aMapWithSize(2))); + for (String modelId : List.of(modelId1, modelId2)) { TrainedModelAssignment assignment = trainedModelAssignmentMetadata.getDeploymentAssignment(modelId); assertThat(assignment, is(notNullValue())); - assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(1))); + assertThat(assignment.getNodeRoutingTable(), is(aMapWithSize(2))); assertThat(assignment.getNodeRoutingTable(), hasKey(nodeId1)); + assertThat(assignment.getNodeRoutingTable().get(nodeId1).getState(), is(RoutingState.STARTED)); + assertThat(assignment.getNodeRoutingTable(), hasKey(nodeId3)); + assertThat(assignment.getNodeRoutingTable().get(nodeId3).getState(), is(RoutingState.STOPPED)); + assertThat(assignment.getNodeRoutingTable(), not(hasKey(nodeId2))); } } - private ClusterState.Builder csBuilderWithNodes(String name, DiscoveryNode... nodes) { + public void testSetShuttingDownNodeRoutesToStopping_GivenAnAssignmentRoutedToShuttingDownNode_ItSetsShuttingDownNodeRouteToStopping() { + var availableNode = "node-1"; + var availableNodeModelId = "available-model-id"; + StartTrainedModelDeploymentAction.TaskParams taskParamsRunning = newParams(availableNodeModelId, 100); + + var shuttingDownNodeId = "shutting-down-1"; + var shuttingDownModelId = "id1"; + StartTrainedModelDeploymentAction.TaskParams taskParamsShuttingDown = newParams(shuttingDownModelId, 100); + + TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + availableNodeModelId, + TrainedModelAssignment.Builder.empty(taskParamsRunning) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .addNewAssignment( + shuttingDownModelId, + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + .addRoutingEntry(shuttingDownNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .build(); + + TrainedModelAssignmentMetadata.Builder rebalanced = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + availableNodeModelId, + TrainedModelAssignment.Builder.empty(taskParamsRunning) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .addNewAssignment( + shuttingDownModelId, + TrainedModelAssignment.Builder.empty(taskParamsRunning) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ); + + TrainedModelAssignmentMetadata result = TrainedModelAssignmentClusterService.setShuttingDownNodeRoutesToStopping( + currentMetadata, + Set.of(shuttingDownNodeId), + rebalanced + ).build(); + + TrainedModelAssignment assignment = result.getDeploymentAssignment(shuttingDownModelId); + assertThat(assignment, is(notNullValue())); + assertThat(assignment.getAssignmentState(), equalTo(AssignmentState.STARTING)); + assertThat(assignment.getNodeRoutingTable().get(availableNode).getState(), is(RoutingState.STARTING)); + assertThat(assignment.getNodeRoutingTable().get(shuttingDownNodeId).getState(), is(RoutingState.STOPPING)); + assertThat(assignment.getReason().isPresent(), is(false)); + } + + public + void + testSetShuttingDownNodeRoutesToStopping_GivenTwoAssignmentsWithOneOnAShuttingDownNode_ItSetsShuttingDownNodeRouteToStopping() { + var availableNode = "node-1"; + + var shuttingDownModelId = "id1"; + StartTrainedModelDeploymentAction.TaskParams taskParamsShuttingDown = newParams(shuttingDownModelId, 300); + + var notShuttingDownModelId = "id2"; + StartTrainedModelDeploymentAction.TaskParams taskParamsNotShuttingDown = newParams(notShuttingDownModelId, 300); + + var shuttingDownNodeId = "shutting-down-1"; + TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + shuttingDownModelId, + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + .addRoutingEntry(shuttingDownNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .addNewAssignment( + notShuttingDownModelId, + TrainedModelAssignment.Builder.empty(taskParamsNotShuttingDown) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .build(); + + TrainedModelAssignmentMetadata.Builder rebalanced = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + shuttingDownModelId, + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ) + .addNewAssignment( + notShuttingDownModelId, + TrainedModelAssignment.Builder.empty(taskParamsNotShuttingDown) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ); + + TrainedModelAssignmentMetadata result = TrainedModelAssignmentClusterService.setShuttingDownNodeRoutesToStopping( + currentMetadata, + Set.of(shuttingDownNodeId), + rebalanced + ).build(); + + TrainedModelAssignment shuttingDownAssignment = result.getDeploymentAssignment(shuttingDownModelId); + assertThat(shuttingDownAssignment, is(notNullValue())); + assertThat(shuttingDownAssignment.getAssignmentState(), equalTo(AssignmentState.STARTING)); + assertThat(shuttingDownAssignment.getNodeRoutingTable().get(availableNode).getState(), is(RoutingState.STARTING)); + assertThat(shuttingDownAssignment.getNodeRoutingTable().get(shuttingDownNodeId).getState(), is(RoutingState.STOPPING)); + assertThat(shuttingDownAssignment.getReason().isPresent(), is(false)); + + TrainedModelAssignment assignment = result.getDeploymentAssignment(notShuttingDownModelId); + assertThat(assignment, is(notNullValue())); + // assignment state is set to starting by default + assertThat(assignment.getAssignmentState(), equalTo(AssignmentState.STARTING)); + assertThat(assignment.getNodeRoutingTable().get(availableNode).getState(), is(RoutingState.STARTED)); + assertThat(assignment.getNodeRoutingTable().get(shuttingDownNodeId), is(nullValue())); + assertThat(assignment.getReason().isPresent(), is(false)); + } + + public + void + testSetShuttingDownNodeRoutesToStopping_GivenShuttingDownNodeWithNoAssociatedAssignments_ItDoesNotMarkAnyAssignmentsAsStopping() { + var availableNode = "node-1"; + + var shuttingDownNodeId = "shutting-down-1"; + var modelId = "id1"; + StartTrainedModelDeploymentAction.TaskParams taskParamsShuttingDown = newParams(modelId, 300); + + var disappearingNodeId = "disappearingNode"; + TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + modelId, + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + .addRoutingEntry(disappearingNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .build(); + + TrainedModelAssignmentMetadata.Builder rebalanced = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + modelId, + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + .addRoutingEntry(availableNode, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ); + + TrainedModelAssignmentMetadata result = TrainedModelAssignmentClusterService.setShuttingDownNodeRoutesToStopping( + currentMetadata, + Set.of(shuttingDownNodeId), + rebalanced + ).build(); + + TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); + assertThat(assignment, is(notNullValue())); + // assignment state is set to starting by default + assertThat(assignment.getAssignmentState(), equalTo(AssignmentState.STARTING)); + assertThat(assignment.getNodeRoutingTable().get(availableNode).getState(), is(RoutingState.STARTED)); + assertThat(assignment.getNodeRoutingTable().get(shuttingDownNodeId), is(nullValue())); + assertThat(assignment.getNodeRoutingTable().get(disappearingNodeId), is(nullValue())); + assertThat(assignment.getReason().isPresent(), is(false)); + } + + public void testSetShuttingDownNodeRoutesToStopping_GivenAssignmentDoesNotExist_ItSetsAssignmentStateToStoppingAndRouteToStopping() { + var shuttingDownNodeId = "shutting-down-1"; + var modelId = "id1"; + StartTrainedModelDeploymentAction.TaskParams taskParamsShuttingDown = newParams(modelId, 300); + + TrainedModelAssignmentMetadata currentMetadata = TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + modelId, + TrainedModelAssignment.Builder.empty(taskParamsShuttingDown) + .addRoutingEntry(shuttingDownNodeId, new RoutingInfo(1, 1, RoutingState.STARTED, "")) + ) + .build(); + + TrainedModelAssignmentMetadata result = TrainedModelAssignmentClusterService.setShuttingDownNodeRoutesToStopping( + currentMetadata, + Set.of(shuttingDownNodeId), + TrainedModelAssignmentMetadata.Builder.empty() + ).build(); + + TrainedModelAssignment assignment = result.getDeploymentAssignment(modelId); + assertThat(assignment, is(notNullValue())); + assertThat(assignment.getAssignmentState(), equalTo(AssignmentState.STOPPING)); + assertThat(assignment.getNodeRoutingTable().get(shuttingDownNodeId).getState(), is(RoutingState.STOPPING)); + assertThat(assignment.getReason().isPresent(), is(true)); + assertThat(assignment.getReason().get(), is("nodes changed")); + } + + private static ClusterState createClusterState(List nodeIds, Metadata metadata) { + DiscoveryNode[] nodes = nodeIds.stream() + .map(id -> buildNode(id, true, ByteSizeValue.ofGb(4).getBytes(), 8)) + .toArray(DiscoveryNode[]::new); + + ClusterState.Builder csBuilder = csBuilderWithNodes("test", nodes); + nodeIds.forEach(id -> csBuilder.putTransportVersion(id, TransportVersion.current())); + + return csBuilder.metadata(metadata).build(); + } + + private static ClusterState.Builder csBuilderWithNodes(String name, DiscoveryNode... nodes) { var csBuilder = ClusterState.builder(new ClusterName(name)); var nodeBuilder = DiscoveryNodes.builder(); for (var node : nodes) { @@ -1443,6 +1833,20 @@ public void testSetAllocationToStopping() { ); } + static NodesShutdownMetadata shutdownMetadata(String nodeId) { + return new NodesShutdownMetadata( + Collections.singletonMap( + nodeId, + SingleNodeShutdownMetadata.builder() + .setType(SingleNodeShutdownMetadata.Type.REMOVE) + .setStartedAtMillis(randomNonNegativeLong()) + .setReason("tests") + .setNodeId(nodeId) + .build() + ) + ); + } + private void assertThatStoppingAssignmentPreventsMutation( Function mutationFunction, ClusterState original @@ -1544,17 +1948,4 @@ private static StartTrainedModelDeploymentAction.TaskParams newParams( ); } - private static NodesShutdownMetadata shutdownMetadata(String nodeId) { - return new NodesShutdownMetadata( - Collections.singletonMap( - nodeId, - SingleNodeShutdownMetadata.builder() - .setType(SingleNodeShutdownMetadata.Type.REMOVE) - .setStartedAtMillis(randomNonNegativeLong()) - .setReason("tests") - .setNodeId(nodeId) - .build() - ) - ); - } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java index 06f486d5ab259..b0903cf47dc88 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; @@ -49,6 +50,7 @@ import java.util.concurrent.TimeUnit; import static org.elasticsearch.xpack.ml.MachineLearning.UTILITY_THREAD_POOL_NAME; +import static org.elasticsearch.xpack.ml.inference.assignment.TrainedModelAssignmentClusterServiceTests.shutdownMetadata; import static org.hamcrest.Matchers.equalTo; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; @@ -92,6 +94,13 @@ public void setupObjects() { listener.onResponse(invocationOnMock.getArguments()[0]); return null; }).when(deploymentManager).startDeployment(any(), any()); + + doAnswer(invocationOnMock -> { + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + listener.onResponse(null); + return null; + }).when(deploymentManager).stopAfterCompletingPendingWork(any()); + doAnswer(invocationOnMock -> { ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; listener.onResponse(AcknowledgedResponse.TRUE); @@ -220,7 +229,7 @@ public void testLoadQueuedModelsWhenTaskIsStopped() throws Exception { // Only one model should be loaded, the other should be stopped trainedModelAssignmentNodeService.prepareModelToLoad(newParams(loadingDeploymentId, modelToLoad)); trainedModelAssignmentNodeService.prepareModelToLoad(newParams(stoppedLoadingDeploymentId, stoppedModelToLoad)); - trainedModelAssignmentNodeService.getTask(stoppedLoadingDeploymentId).stop("testing", ActionListener.noop()); + trainedModelAssignmentNodeService.getTask(stoppedLoadingDeploymentId).stop("testing", false, ActionListener.noop()); trainedModelAssignmentNodeService.loadQueuedModels(); assertBusy(() -> { @@ -356,6 +365,190 @@ public void testClusterChangedWithResetMode() { verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); } + public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_CallsStopAfterCompletingPendingWork() + throws InterruptedException { + final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); + final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); + String modelOne = "model-1"; + String deploymentOne = "deployment-1"; + + ArgumentCaptor stopParamsCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); + + CountDownLatch stopProcessCompletedLatch = new CountDownLatch(1); + doAnswer(invocationOnMock -> { + @SuppressWarnings({ "unchecked", "rawtypes" }) + ActionListener listener = (ActionListener) invocationOnMock.getArguments()[1]; + stopProcessCompletedLatch.countDown(); + listener.onResponse(AcknowledgedResponse.TRUE); + return null; + }).when(trainedModelAssignmentService).updateModelAssignmentState(any(), any()); + + var taskParams = newParams(deploymentOne, modelOne); + + ClusterChangedEvent event = new ClusterChangedEvent( + "testClusterChanged", + ClusterState.builder(new ClusterName("testClusterChanged")) + .nodes(nodes) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + deploymentOne, + TrainedModelAssignment.Builder.empty(taskParams) + .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) + ) + .build() + ) + .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .build() + ) + .build(), + ClusterState.EMPTY_STATE + ); + + trainedModelAssignmentNodeService.prepareModelToLoad(taskParams); + trainedModelAssignmentNodeService.clusterChanged(event); + + if (stopProcessCompletedLatch.await(1, TimeUnit.MINUTES) == false) { + fail("Failed waiting for the stop process call to complete"); + } + + verify(deploymentManager, times(1)).stopAfterCompletingPendingWork(stopParamsCapture.capture()); + assertThat(stopParamsCapture.getValue().getModelId(), equalTo(modelOne)); + assertThat(stopParamsCapture.getValue().getDeploymentId(), equalTo(deploymentOne)); + verify(trainedModelAssignmentService, times(1)).updateModelAssignmentState( + any(UpdateTrainedModelAssignmentRoutingInfoAction.Request.class), + any() + ); + verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); + } + + public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeButAlreadyRemoved_DoesNotCallStop() { + final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); + final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); + String modelOne = "model-1"; + String deploymentOne = "deployment-1"; + + ArgumentCaptor stopParamsCapture = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); + var taskParams = newParams(deploymentOne, modelOne); + + ClusterChangedEvent event = new ClusterChangedEvent( + "testClusterChanged", + ClusterState.builder(new ClusterName("testClusterChanged")) + .nodes(nodes) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + deploymentOne, + TrainedModelAssignment.Builder.empty(taskParams) + .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STOPPING, "")) + ) + .build() + ) + .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .build() + ) + .build(), + ClusterState.EMPTY_STATE + ); + + trainedModelAssignmentNodeService.clusterChanged(event); + + verify(deploymentManager, never()).stopAfterCompletingPendingWork(any()); + verify(trainedModelAssignmentService, never()).updateModelAssignmentState( + any(UpdateTrainedModelAssignmentRoutingInfoAction.Request.class), + any() + ); + verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); + } + + public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeWithStartingState_DoesNotStopTheDeployment() { + final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); + final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); + String modelOne = "model-1"; + String deploymentOne = "deployment-1"; + + var taskParams = newParams(deploymentOne, modelOne); + + ClusterChangedEvent event = new ClusterChangedEvent( + "testClusterChanged", + ClusterState.builder(new ClusterName("testClusterChanged")) + .nodes(nodes) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + deploymentOne, + TrainedModelAssignment.Builder.empty(taskParams) + .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + ) + .build() + ) + .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .build() + ) + .build(), + ClusterState.EMPTY_STATE + ); + + trainedModelAssignmentNodeService.prepareModelToLoad(taskParams); + trainedModelAssignmentNodeService.clusterChanged(event); + + verify(deploymentManager, never()).stopAfterCompletingPendingWork(any()); + verify(trainedModelAssignmentService, never()).updateModelAssignmentState( + any(UpdateTrainedModelAssignmentRoutingInfoAction.Request.class), + any() + ); + verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); + } + + public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded() { + final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); + final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); + String modelOne = "model-1"; + String deploymentOne = "deployment-1"; + + var taskParams = newParams(deploymentOne, modelOne); + + ClusterChangedEvent event = new ClusterChangedEvent( + "testClusterChanged", + ClusterState.builder(new ClusterName("testClusterChanged")) + .nodes(nodes) + .metadata( + Metadata.builder() + .putCustom( + TrainedModelAssignmentMetadata.NAME, + TrainedModelAssignmentMetadata.Builder.empty() + .addNewAssignment( + deploymentOne, + TrainedModelAssignment.Builder.empty(taskParams) + .addRoutingEntry(NODE_ID, new RoutingInfo(1, 1, RoutingState.STARTING, "")) + .stopAssignment("stopping") + ) + .build() + ) + .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(false).build()) + .build() + ) + .build(), + ClusterState.EMPTY_STATE + ); + + // trainedModelAssignmentNodeService.prepareModelToLoad(taskParams); + trainedModelAssignmentNodeService.clusterChanged(event); + trainedModelAssignmentNodeService.loadQueuedModels(); + + verify(deploymentManager, never()).startDeployment(any(), any()); + verifyNoMoreInteractions(deploymentManager, trainedModelAssignmentService); + } + public void testClusterChanged() throws Exception { final TrainedModelAssignmentNodeService trainedModelAssignmentNodeService = createService(); final DiscoveryNodes nodes = DiscoveryNodes.builder().localNodeId(NODE_ID).add(DiscoveryNodeUtils.create(NODE_ID, NODE_ID)).build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java index b992d4fdc2ec3..996f80ce47e2e 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/ThreadSettingsControlMessagePytorchActionTests.java @@ -10,6 +10,7 @@ import org.apache.lucene.util.SetOnce; import org.elasticsearch.action.ActionListener; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.util.concurrent.DeterministicTaskQueue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; @@ -20,6 +21,7 @@ import org.mockito.ArgumentCaptor; import java.io.IOException; +import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicInteger; import static org.mockito.ArgumentMatchers.any; @@ -58,9 +60,8 @@ public void testRunNotCalledAfterNotified() { AtomicInteger timeoutCount = new AtomicInteger(); when(processContext.getTimeoutCount()).thenReturn(timeoutCount); - Scheduler.ScheduledCancellable cancellable = mock(Scheduler.ScheduledCancellable.class); - ThreadPool tp = mock(ThreadPool.class); - when(tp.schedule(any(), any(), any())).thenReturn(cancellable); + final var deterministicTaskQueue = new DeterministicTaskQueue(); + ThreadPool tp = deterministicTaskQueue.getThreadPool(); { ActionListener listener = mock(ActionListener.class); @@ -116,7 +117,7 @@ public void testDoRun() throws IOException { Scheduler.ScheduledCancellable cancellable = mock(Scheduler.ScheduledCancellable.class); ThreadPool tp = mock(ThreadPool.class); - when(tp.schedule(any(), any(), any())).thenReturn(cancellable); + when(tp.schedule(any(), any(), any(Executor.class))).thenReturn(cancellable); ActionListener listener = mock(ActionListener.class); ArgumentCaptor messageCapture = ArgumentCaptor.forClass(BytesReference.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTaskTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTaskTests.java index 44dc44971bf38..93d5f309f899d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTaskTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/deployment/TrainedModelDeploymentTaskTests.java @@ -39,11 +39,18 @@ void assertTrackingComplete(Consumer method, String TrainedModelAssignmentNodeService nodeService = mock(TrainedModelAssignmentNodeService.class); ArgumentCaptor taskCaptor = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); - ArgumentCaptor reasonCaptur = ArgumentCaptor.forClass(String.class); + ArgumentCaptor reasonCaptor = ArgumentCaptor.forClass(String.class); doAnswer(invocation -> { - taskCaptor.getValue().markAsStopped(reasonCaptur.getValue()); + taskCaptor.getValue().markAsStopped(reasonCaptor.getValue()); return null; - }).when(nodeService).stopDeploymentAndNotify(taskCaptor.capture(), reasonCaptur.capture(), any()); + }).when(nodeService).stopDeploymentAndNotify(taskCaptor.capture(), reasonCaptor.capture(), any()); + + ArgumentCaptor taskCaptorGraceful = ArgumentCaptor.forClass(TrainedModelDeploymentTask.class); + ArgumentCaptor reasonCaptorGraceful = ArgumentCaptor.forClass(String.class); + doAnswer(invocation -> { + taskCaptorGraceful.getValue().markAsStopped(reasonCaptorGraceful.getValue()); + return null; + }).when(nodeService).gracefullyStopDeploymentAndNotify(taskCaptorGraceful.capture(), reasonCaptorGraceful.capture(), any()); TrainedModelDeploymentTask task = new TrainedModelDeploymentTask( 0, @@ -79,7 +86,11 @@ public void testMarkAsStopped() { } public void testOnStop() { - assertTrackingComplete(t -> t.stop("foo", ActionListener.noop()), randomAlphaOfLength(10), randomAlphaOfLength(10)); + assertTrackingComplete(t -> t.stop("foo", false, ActionListener.noop()), randomAlphaOfLength(10), randomAlphaOfLength(10)); + } + + public void testOnStopGracefully() { + assertTrackingComplete(t -> t.stop("foo", true, ActionListener.noop()), randomAlphaOfLength(10), randomAlphaOfLength(10)); } public void testCancelled() { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/PriorityProcessWorkerExecutorServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/PriorityProcessWorkerExecutorServiceTests.java index a96d5e9852229..d1923ca999063 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/PriorityProcessWorkerExecutorServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/PriorityProcessWorkerExecutorServiceTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.ml.inference.pytorch; +import org.elasticsearch.common.Strings; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -15,6 +16,8 @@ import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.xpack.ml.inference.pytorch.PriorityProcessWorkerExecutorService.RequestPriority; @@ -97,6 +100,51 @@ public void testOrderedRunnables_NormalPriority() { assertTrue(r3.hasBeenRun); } + public void testExecutorShutsDownAfterCompletingWork() { + var executor = createProcessWorkerExecutorService(100); + + var counter = new AtomicInteger(); + + var r1 = new RunOrderValidator(1, counter); + executor.executeWithPriority(r1, RequestPriority.NORMAL, 100L); + var r2 = new RunOrderValidator(2, counter); + executor.executeWithPriority(r2, RequestPriority.NORMAL, 101L); + var r3 = new RunOrderValidator(3, counter); + executor.executeWithPriority(r3, RequestPriority.NORMAL, 102L); + + runExecutorAndAssertTermination(executor); + + assertTrue(r1.initialized); + assertTrue(r2.initialized); + assertTrue(r3.initialized); + + assertTrue(r1.hasBeenRun); + assertTrue(r2.hasBeenRun); + assertTrue(r3.hasBeenRun); + } + + private void runExecutorAndAssertTermination(PriorityProcessWorkerExecutorService executor) { + Future executorTermination = threadPool.generic().submit(() -> { + try { + executor.shutdown(); + executor.awaitTermination(1, TimeUnit.MINUTES); + } catch (Exception e) { + fail(Strings.format("Failed to gracefully shutdown executor: %s", e.getMessage())); + } + }); + + executor.start(); + + try { + executorTermination.get(1, TimeUnit.SECONDS); + } catch (Exception e) { + fail(Strings.format("Executor finished before it was signaled to shutdown gracefully")); + } + + assertTrue(executor.isShutdown()); + assertTrue(executor.isTerminated()); + } + public void testOrderedRunnables_MixedPriorities() { var executor = createProcessWorkerExecutorService(100); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java index 55925ccf5a5bb..e172f4ffb528c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/pytorch/process/PyTorchResultProcessorTests.java @@ -19,6 +19,8 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Consumer; import java.util.function.LongSupplier; @@ -81,6 +83,24 @@ public void testResultsProcessing() { assertTrue(errorListener.hasResponse); } + public void testAwaitCompletion() { + var inferenceResult = new PyTorchInferenceResult(null); + var inferenceListener = new AssertingResultListener(r -> assertEquals(inferenceResult, r.inferenceResult())); + + var processor = new PyTorchResultProcessor("foo", s -> {}); + processor.registerRequest("a", inferenceListener); + + processor.process(mockNativeProcess(List.of(new PyTorchResult("a", true, 1000L, inferenceResult, null, null, null)).iterator())); + + try { + processor.awaitCompletion(5, TimeUnit.SECONDS); + } catch (TimeoutException e) { + fail("Timed out waiting for the processor to complete"); + } + + assertTrue(inferenceListener.hasResponse); + } + public void testPendingRequest() { var processor = new PyTorchResultProcessor("foo", s -> {}); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java index 5b95b659a9fe4..2dc8b77fc5834 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/persistence/JobResultsPersisterTests.java @@ -57,6 +57,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import static org.hamcrest.Matchers.equalTo; @@ -435,7 +436,7 @@ private ResultsPersisterService buildResultsPersisterService(OriginSettingClient doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[0]).run(); return null; - }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(String.class)); + }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(Executor.class)); return new ResultsPersisterService(tp, client, clusterService, Settings.EMPTY); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorServiceTests.java index 6e7f6b5832a8b..096d0b7105ce5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/ProcessWorkerExecutorServiceTests.java @@ -40,7 +40,7 @@ public void testAutodetectWorkerExecutorService_SubmitAfterShutdown() { ProcessWorkerExecutorService executor = createExecutorService(); threadPool.generic().execute(executor::start); - executor.shutdown(); + executor.shutdownNow(); AtomicBoolean rejected = new AtomicBoolean(false); AtomicBoolean initialized = new AtomicBoolean(false); executor.execute(new AbstractInitializableRunnable() { @@ -106,9 +106,9 @@ protected void doRun() { boolean shutdownWithError = randomBoolean(); // now shutdown if (shutdownWithError) { - executor.shutdownWithError(new ElasticsearchException("stopping the executor because an error occurred")); + executor.shutdownNowWithError(new ElasticsearchException("stopping the executor because an error occurred")); } else { - executor.shutdown(); + executor.shutdownNow(); } latch.countDown(); executorFinished.get(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java index 7aee18aeb4ca5..3941c10fb8a19 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/process/autodetect/AutodetectCommunicatorTests.java @@ -206,7 +206,7 @@ public void testKill() throws IOException, TimeoutException { communicator.killProcess(awaitCompletion, finish); verify(resultProcessor).setProcessKilled(); verify(process).kill(awaitCompletion); - verify(executorService).shutdown(); + verify(executorService).shutdownNow(); if (awaitCompletion) { verify(resultProcessor).awaitCompletion(); } else { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 26ee875049c21..918b660ae7e61 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -94,6 +94,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.ExecutionException; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; @@ -292,7 +293,7 @@ protected static ThreadPool mockThreadPool() { doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[0]).run(); return null; - }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(String.class)); + }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(Executor.class)); return tp; } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java index 020ee64a0328a..87743158995d4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/TransportVersionUtilsTests.java @@ -7,9 +7,11 @@ package org.elasticsearch.xpack.ml.utils; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.block.ClusterBlocks; +import org.elasticsearch.cluster.version.CompatibilityVersions; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.utils.TransportVersionUtils; @@ -19,15 +21,15 @@ public class TransportVersionUtilsTests extends ESTestCase { - private static final Map transportVersions = Map.of( + private static final Map transportVersions = Map.of( "Alfredo", - TransportVersion.V_7_0_0, + new CompatibilityVersions(TransportVersions.V_7_0_0), "Bertram", - TransportVersion.V_7_0_1, + new CompatibilityVersions(TransportVersions.V_7_0_1), "Charles", - TransportVersion.V_8_500_010, + new CompatibilityVersions(TransportVersions.V_8_500_020), "Dominic", - TransportVersion.V_8_0_0 + new CompatibilityVersions(TransportVersions.V_8_0_0) ); private static final ClusterState state = new ClusterState( @@ -45,13 +47,13 @@ public class TransportVersionUtilsTests extends ESTestCase { ); public void testGetMinTransportVersion() { - assertThat(TransportVersionUtils.getMinTransportVersion(state), equalTo(TransportVersion.V_7_0_0)); + assertThat(TransportVersionUtils.getMinTransportVersion(state), equalTo(TransportVersions.V_7_0_0)); } public void testIsMinTransformVersionSameAsCurrent() { assertThat(TransportVersionUtils.isMinTransportVersionSameAsCurrent(state), equalTo(false)); - Map transportVersions1 = Map.of("Eugene", TransportVersion.current()); + Map transportVersions1 = Map.of("Eugene", new CompatibilityVersions(TransportVersion.current())); ClusterState state1 = new ClusterState( new ClusterName("harry"), @@ -71,7 +73,7 @@ public void testIsMinTransformVersionSameAsCurrent() { } public void testIsMinTransportVersionOnOrAfter() { - assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersion.V_7_0_0), equalTo(true)); - assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersion.V_8_500_010), equalTo(false)); + assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_7_0_0), equalTo(true)); + assertThat(TransportVersionUtils.isMinTransportVersionOnOrAfter(state, TransportVersions.V_8_500_020), equalTo(false)); } } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java index de7007eda9146..ec07d0424c841 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/utils/persistence/ResultsPersisterServiceTests.java @@ -49,6 +49,7 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; +import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; @@ -414,7 +415,7 @@ public static ResultsPersisterService buildResultsPersisterService(OriginSetting doAnswer(invocationOnMock -> { ((Runnable) invocationOnMock.getArguments()[0]).run(); return null; - }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(String.class)); + }).when(tp).schedule(any(Runnable.class), any(TimeValue.class), any(Executor.class)); return new ResultsPersisterService(tp, client, clusterService, Settings.EMPTY); } } diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java index 325817769745f..3e6ccb425fec5 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/MonitoringService.java @@ -28,6 +28,7 @@ import java.util.Collection; import java.util.Objects; import java.util.Set; +import java.util.concurrent.Executor; import java.util.concurrent.Semaphore; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -96,6 +97,7 @@ public class MonitoringService extends AbstractLifecycleComponent { private final ClusterService clusterService; private final ThreadPool threadPool; + private final Executor executor; private final Set collectors; private final Exporters exporters; @@ -113,6 +115,7 @@ public class MonitoringService extends AbstractLifecycleComponent { ) { this.clusterService = Objects.requireNonNull(clusterService); this.threadPool = Objects.requireNonNull(threadPool); + this.executor = threadPool.generic(); this.collectors = Objects.requireNonNull(collectors); this.exporters = Objects.requireNonNull(exporters); this.elasticsearchCollectionEnabled = ELASTICSEARCH_COLLECTION_ENABLED.get(settings); @@ -158,10 +161,6 @@ boolean shouldScheduleExecution() { return isElasticsearchCollectionEnabled() && isMonitoringActive(); } - private String threadPoolName() { - return ThreadPool.Names.GENERIC; - } - boolean isStarted() { return started.get(); } @@ -217,7 +216,7 @@ void scheduleExecution() { cancelExecution(); } if (shouldScheduleExecution()) { - scheduler = threadPool.scheduleWithFixedDelay(monitor, interval, threadPoolName()); + scheduler = threadPool.scheduleWithFixedDelay(monitor, interval, executor); } } @@ -264,7 +263,7 @@ public void doRun() { return; } - threadPool.executor(threadPoolName()).submit(new AbstractRunnable() { + executor.execute(new AbstractRunnable() { @Override protected void doRun() throws Exception { final long timestamp = System.currentTimeMillis(); diff --git a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java index 157245f583b7d..dad3d820278e7 100644 --- a/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java +++ b/x-pack/plugin/monitoring/src/main/java/org/elasticsearch/xpack/monitoring/cleaner/CleanerService.java @@ -23,6 +23,7 @@ import java.time.ZonedDateTime; import java.util.List; import java.util.concurrent.CopyOnWriteArrayList; +import java.util.concurrent.Executor; /** * {@code CleanerService} takes care of deleting old monitoring indices. @@ -31,6 +32,7 @@ public class CleanerService extends AbstractLifecycleComponent { private static final Logger logger = LogManager.getLogger(CleanerService.class); private final ThreadPool threadPool; + private final Executor genericExecutor; private final ExecutionScheduler executionScheduler; private final List listeners = new CopyOnWriteArrayList<>(); private final IndicesCleaner runnable; @@ -39,6 +41,7 @@ public class CleanerService extends AbstractLifecycleComponent { CleanerService(Settings settings, ClusterSettings clusterSettings, ThreadPool threadPool, ExecutionScheduler executionScheduler) { this.threadPool = threadPool; + this.genericExecutor = threadPool.generic(); this.executionScheduler = executionScheduler; this.globalRetention = MonitoringField.HISTORY_DURATION.get(settings); this.runnable = new IndicesCleaner(); @@ -54,7 +57,7 @@ public CleanerService(Settings settings, ClusterSettings clusterSettings, Thread @Override protected void doStart() { logger.debug("starting cleaning service"); - threadPool.schedule(runnable, executionScheduler.nextExecutionDelay(ZonedDateTime.now(Clock.systemDefaultZone())), executorName()); + threadPool.schedule(runnable, executionScheduler.nextExecutionDelay(ZonedDateTime.now(Clock.systemDefaultZone())), genericExecutor); logger.debug("cleaning service started"); } @@ -173,7 +176,7 @@ public void onAfter() { logger.debug("scheduling next execution in [{}] seconds", delay.seconds()); try { - cancellable = threadPool.schedule(this, delay, executorName()); + cancellable = threadPool.schedule(this, delay, genericExecutor); } catch (EsRejectedExecutionException e) { if (e.isExecutorShutdown()) { logger.debug("couldn't schedule new execution of the cleaner, executor is shutting down", e); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java index 024a3051e2b19..40a04115173fc 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/collector/cluster/ClusterStatsMonitoringDocTests.java @@ -768,7 +768,8 @@ public void testToXContent() throws IOException { "max_index_version":%s } }, - "transport_versions": [] + "transport_versions": [], + "nodes_versions": [] }, "cluster_settings": { "cluster": { diff --git a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java index df22c6d8ea9a6..915935a2d6b24 100644 --- a/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java +++ b/x-pack/plugin/old-lucene-versions/src/main/java/org/elasticsearch/xpack/lucene/bwc/OldLuceneVersions.java @@ -112,7 +112,7 @@ public Collection createComponents( // then marking the feature as used. We do this on each master node so that if one master fails, the // continue reporting usage state. var usageTracker = new ArchiveUsageTracker(getLicenseState(), clusterService::state); - threadPool.scheduleWithFixedDelay(usageTracker, TimeValue.timeValueMinutes(15), ThreadPool.Names.GENERIC); + threadPool.scheduleWithFixedDelay(usageTracker, TimeValue.timeValueMinutes(15), threadPool.generic()); } return List.of(); } diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java index ea4d6b406e749..7a6fba1f04c84 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/GetStackTracesResponse.java @@ -38,20 +38,20 @@ public GetStackTracesResponse(StreamInput in) throws IOException { this.stackTraces = in.readBoolean() ? in.readMap( i -> new StackTrace( - i.readList(StreamInput::readInt), - i.readList(StreamInput::readString), - i.readList(StreamInput::readString), - i.readList(StreamInput::readInt) + i.readCollectionAsList(StreamInput::readInt), + i.readCollectionAsList(StreamInput::readString), + i.readCollectionAsList(StreamInput::readString), + i.readCollectionAsList(StreamInput::readInt) ) ) : null; this.stackFrames = in.readBoolean() ? in.readMap( i -> new StackFrame( - i.readList(StreamInput::readString), - i.readList(StreamInput::readString), - i.readList(StreamInput::readInt), - i.readList(StreamInput::readInt) + i.readCollectionAsList(StreamInput::readString), + i.readCollectionAsList(StreamInput::readString), + i.readCollectionAsList(StreamInput::readInt), + i.readCollectionAsList(StreamInput::readInt) ) ) : null; @@ -81,10 +81,10 @@ public GetStackTracesResponse( public void writeTo(StreamOutput out) throws IOException { if (stackTraces != null) { out.writeBoolean(true); - out.writeMap(stackTraces, StreamOutput::writeString, (o, v) -> { + out.writeMap(stackTraces, (o, v) -> { o.writeCollection(v.addressOrLines, StreamOutput::writeInt); - o.writeCollection(v.fileIds, StreamOutput::writeString); - o.writeCollection(v.frameIds, StreamOutput::writeString); + o.writeStringCollection(v.fileIds); + o.writeStringCollection(v.frameIds); o.writeCollection(v.typeIds, StreamOutput::writeInt); }); } else { @@ -92,9 +92,9 @@ public void writeTo(StreamOutput out) throws IOException { } if (stackFrames != null) { out.writeBoolean(true); - out.writeMap(stackFrames, StreamOutput::writeString, (o, v) -> { - o.writeCollection(v.fileName, StreamOutput::writeString); - o.writeCollection(v.functionName, StreamOutput::writeString); + out.writeMap(stackFrames, (o, v) -> { + o.writeStringCollection(v.fileName); + o.writeStringCollection(v.functionName); o.writeCollection(v.functionOffset, StreamOutput::writeInt); o.writeCollection(v.lineNumber, StreamOutput::writeInt); }); @@ -103,13 +103,13 @@ public void writeTo(StreamOutput out) throws IOException { } if (executables != null) { out.writeBoolean(true); - out.writeMap(executables, StreamOutput::writeString, StreamOutput::writeString); + out.writeMap(executables, StreamOutput::writeString); } else { out.writeBoolean(false); } if (stackTraceEvents != null) { out.writeBoolean(true); - out.writeMap(stackTraceEvents, StreamOutput::writeString, StreamOutput::writeInt); + out.writeMap(stackTraceEvents, StreamOutput::writeInt); } else { out.writeBoolean(false); } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java index 80e2a265c71c5..29705d9e4b116 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/async/AsyncTaskManagementService.java @@ -214,7 +214,7 @@ private ActionListener wrapStoringListener( if (acquiredListener != null) { acquiredListener.onResponse(operation.initialResponse(searchTask)); } - }, waitForCompletionTimeout, ThreadPool.Names.SEARCH); + }, waitForCompletionTimeout, threadPool.executor(ThreadPool.Names.SEARCH)); // This will be performed at the end of normal execution return ActionListener.wrap(response -> { ActionListener acquiredListener = exclusiveListener.getAndSet(null); @@ -324,7 +324,7 @@ public static listener.onResponse(new StoredAsyncResponse<>(r, task.getExpirationTimeMillis())), e -> listener.onResponse(new StoredAsyncResponse<>(e, task.getExpirationTimeMillis())) diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java index f8f20d25df708..8dfe040a839bb 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/execution/search/extractor/AbstractFieldHitExtractor.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.ql.execution.search.extractor; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.document.DocumentField; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -74,7 +74,7 @@ protected AbstractFieldHitExtractor(StreamInput in) throws IOException { String typeName = in.readOptionalString(); dataType = typeName != null ? loadTypeFromName(typeName) : null; hitName = in.readOptionalString(); - if (in.getTransportVersion().before(TransportVersion.V_8_6_0)) { + if (in.getTransportVersion().before(TransportVersions.V_8_6_0)) { this.multiValueSupport = in.readBoolean() ? MultiValueSupport.LENIENT : MultiValueSupport.NONE; } else { this.multiValueSupport = in.readEnum(MultiValueSupport.class); @@ -93,7 +93,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(fieldName); out.writeOptionalString(dataType == null ? null : dataType.typeName()); out.writeOptionalString(hitName); - if (out.getTransportVersion().before(TransportVersion.V_8_6_0)) { + if (out.getTransportVersion().before(TransportVersions.V_8_6_0)) { out.writeBoolean(multiValueSupport != MultiValueSupport.NONE); } else { out.writeEnum(multiValueSupport); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/NameId.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/NameId.java index e099621756e37..20e8214ddef34 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/NameId.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/NameId.java @@ -26,10 +26,6 @@ public NameId() { this.id = COUNTER.incrementAndGet(); } - public NameId(long id) { - this.id = id; - } - @Override public int hashCode() { return Objects.hash(id); diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/InProcessor.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/InProcessor.java index 6b3428e0f364d..b3b0a4844e5c3 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/InProcessor.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/expression/predicate/operator/comparison/InProcessor.java @@ -26,7 +26,7 @@ public class InProcessor implements Processor { } public InProcessor(StreamInput in) throws IOException { - processsors = in.readNamedWriteableList(Processor.class); + processsors = in.readNamedWriteableCollectionAsList(Processor.class); } @Override @@ -36,7 +36,7 @@ public String getWriteableName() { @Override public final void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(processsors); + out.writeNamedWriteableCollection(processsors); } @Override diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java index eb37b9256ad34..16d245bb93de5 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/IndexResolver.java @@ -59,10 +59,8 @@ import java.util.function.Supplier; import java.util.regex.Pattern; -import static java.util.Arrays.asList; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; -import static java.util.Collections.emptySet; import static org.elasticsearch.action.ActionListener.wrap; import static org.elasticsearch.common.Strings.hasText; import static org.elasticsearch.common.regex.Regex.simpleMatch; @@ -104,52 +102,13 @@ public String toNative() { } } - public static class IndexInfo { - private final String cluster; - private final String name; - private final IndexType type; - - public IndexInfo(String cluster, String name, IndexType type) { - this.cluster = cluster; - this.name = name; - this.type = type; - } - - public String cluster() { - return cluster; - } - - public String name() { - return name; - } - - public IndexType type() { - return type; - } + public record IndexInfo(String cluster, String name, IndexType type) { @Override public String toString() { return buildRemoteIndexName(cluster, name); } - @Override - public int hashCode() { - return Objects.hash(cluster, name, type); - } - - @Override - public boolean equals(Object obj) { - if (this == obj) { - return true; - } - - if (obj == null || getClass() != obj.getClass()) { - return false; - } - - IndexResolver.IndexInfo other = (IndexResolver.IndexInfo) obj; - return Objects.equals(cluster, other.cluster) && Objects.equals(name, other.name) && Objects.equals(type, other.type); - } } public static final String SQL_TABLE = "TABLE"; @@ -173,6 +132,7 @@ public boolean equals(Object obj) { EnumSet.of(WildcardStates.OPEN) ); + public static final Set ALL_FIELDS = Set.of("*"); private static final String UNMAPPED = "unmapped"; private final Client client; @@ -304,6 +264,7 @@ private void resolveRemoteIndices( IndicesOptions indicesOptions = retrieveFrozenIndices ? FIELD_CAPS_FROZEN_INDICES_OPTIONS : FIELD_CAPS_INDICES_OPTIONS; FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest( qualifyAndJoinIndices(clusterWildcard, indexWildcards), + ALL_FIELDS, indicesOptions, emptyMap() ); @@ -356,11 +317,12 @@ private boolean clusterIsLocal(String clusterWildcard) { */ public void resolveAsMergedMapping( String indexWildcard, + Set fieldNames, IndicesOptions indicesOptions, Map runtimeMappings, ActionListener listener ) { - FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, indicesOptions, runtimeMappings); + FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, fieldNames, indicesOptions, runtimeMappings); client.fieldCaps( fieldRequest, listener.delegateFailureAndWrap((l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response))) @@ -372,11 +334,12 @@ public void resolveAsMergedMapping( */ public void resolveAsMergedMapping( String indexWildcard, + Set fieldNames, boolean includeFrozen, Map runtimeMappings, ActionListener listener ) { - FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, includeFrozen, runtimeMappings); + FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, fieldNames, includeFrozen, runtimeMappings); client.fieldCaps( fieldRequest, listener.delegateFailureAndWrap((l, response) -> l.onResponse(mergedMappings(typeRegistry, indexWildcard, response))) @@ -559,11 +522,12 @@ private static EsField createField( private static FieldCapabilitiesRequest createFieldCapsRequest( String index, + Set fieldNames, IndicesOptions indicesOptions, Map runtimeMappings ) { return new FieldCapabilitiesRequest().indices(Strings.commaDelimitedListToStringArray(index)) - .fields("*") + .fields(fieldNames.toArray(String[]::new)) .includeUnmapped(true) .runtimeFields(runtimeMappings) // lenient because we throw our own errors looking at the response e.g. if something was not resolved @@ -573,11 +537,12 @@ private static FieldCapabilitiesRequest createFieldCapsRequest( private static FieldCapabilitiesRequest createFieldCapsRequest( String index, + Set fieldNames, boolean includeFrozen, Map runtimeMappings ) { IndicesOptions indicesOptions = includeFrozen ? FIELD_CAPS_FROZEN_INDICES_OPTIONS : FIELD_CAPS_INDICES_OPTIONS; - return createFieldCapsRequest(index, indicesOptions, runtimeMappings); + return createFieldCapsRequest(index, fieldNames, indicesOptions, runtimeMappings); } /** @@ -590,7 +555,7 @@ public void resolveAsSeparateMappings( Map runtimeMappings, ActionListener> listener ) { - FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, includeFrozen, runtimeMappings); + FieldCapabilitiesRequest fieldRequest = createFieldCapsRequest(indexWildcard, ALL_FIELDS, includeFrozen, runtimeMappings); client.fieldCaps(fieldRequest, listener.delegateFailureAndWrap((delegate, response) -> { client.admin().indices().getAliases(createGetAliasesRequest(response, includeFrozen), wrap(aliases -> { delegate.onResponse(separateMappings(typeRegistry, javaRegex, response, aliases.getAliases())); @@ -653,129 +618,70 @@ private static List buildIndices( } } - List resolvedIndices = new ArrayList<>(asList(fieldCapsResponse.getIndices())); - int mapSize = CollectionUtils.mapSize(resolvedIndices.size() + resolvedAliases.size()); - Map indices = Maps.newLinkedHashMapWithExpectedSize(mapSize); + Map indices = Maps.newLinkedHashMapWithExpectedSize(fieldCapsResponse.getIndices().length + resolvedAliases.size()); Pattern pattern = javaRegex != null ? Pattern.compile(javaRegex) : null; // sort fields in reverse order to build the field hierarchy - Set>> sortedFields = new TreeSet<>( - Collections.reverseOrder(Comparator.comparing(Entry::getKey)) - ); + TreeMap> sortedFields = new TreeMap<>(Collections.reverseOrder()); final Map> fieldCaps = fieldCapsResponse.get(); - sortedFields.addAll(fieldCaps.entrySet()); - - for (Entry> entry : sortedFields) { + for (Entry> entry : fieldCaps.entrySet()) { String fieldName = entry.getKey(); - if (fieldCapsResponse.isMetadataField(fieldName)) { - // skip metadata field! - continue; + // skip metadata field! + if (fieldCapsResponse.isMetadataField(fieldName) == false) { + sortedFields.put(fieldName, entry.getValue()); } - Map types = new LinkedHashMap<>(entry.getValue()); + } + + for (Entry> entry : sortedFields.entrySet()) { + String fieldName = entry.getKey(); + Map types = entry.getValue(); final InvalidMappedField invalidField = validityVerifier.apply(fieldName, types); // apply verification for fields belonging to index aliases Map invalidFieldsForAliases = getInvalidFieldsForAliases(fieldName, types, aliases); - // filter unmapped - FieldCapabilities unmapped = types.get(UNMAPPED); - Set unmappedIndices = unmapped != null ? new HashSet<>(asList(unmapped.indices())) : emptySet(); - // check each type for (Entry typeEntry : types.entrySet()) { + if (UNMAPPED.equals(typeEntry.getKey())) { + continue; + } FieldCapabilities typeCap = typeEntry.getValue(); String[] capIndices = typeCap.indices(); // compute the actual indices - if any are specified, take into account the unmapped indices - List concreteIndices = null; + final String[] concreteIndices; if (capIndices != null) { - if (unmappedIndices.isEmpty()) { - concreteIndices = new ArrayList<>(asList(capIndices)); - } else { - concreteIndices = new ArrayList<>(capIndices.length); - for (String capIndex : capIndices) { - // add only indices that have a mapping - if (unmappedIndices.contains(capIndex) == false) { - concreteIndices.add(capIndex); - } - } - } + concreteIndices = capIndices; } else { - concreteIndices = resolvedIndices; + concreteIndices = fieldCapsResponse.getIndices(); } - // add to the list of concrete indices the aliases associated with these indices Set uniqueAliases = new LinkedHashSet<>(); - if (aliases != null) { - for (String concreteIndex : concreteIndices) { - if (aliases.containsKey(concreteIndex)) { - List concreteIndexAliases = aliases.get(concreteIndex); - concreteIndexAliases.forEach(e -> uniqueAliases.add(e.alias())); + // put the field in their respective mappings and collect the aliases names + for (String index : concreteIndices) { + List concreteIndexAliases = aliases != null ? aliases.get(index) : null; + if (concreteIndexAliases != null) { + for (AliasMetadata e : concreteIndexAliases) { + uniqueAliases.add(e.alias()); } } - concreteIndices.addAll(uniqueAliases); - } - - // put the field in their respective mappings - for (String index : concreteIndices) { - boolean isIndexAlias = uniqueAliases.contains(index); // TODO is split still needed? - if (pattern == null || pattern.matcher(splitQualifiedIndex(index).v2()).matches() || isIndexAlias) { - String indexName = isIndexAlias ? index : indexNameProcessor.apply(index); - Fields indexFields = indices.get(indexName); - if (indexFields == null) { - indexFields = new Fields(); - indices.put(indexName, indexFields); - } + if (pattern == null || pattern.matcher(splitQualifiedIndex(index).v2()).matches()) { + String indexName = indexNameProcessor.apply(index); + Fields indexFields = indices.computeIfAbsent(indexName, k -> new Fields()); EsField field = indexFields.flattedMapping.get(fieldName); - boolean createField = false; - if (isIndexAlias == false) { - if (field == null || (invalidField != null && (field instanceof InvalidMappedField) == false)) { - createField = true; - } - } else { - if (field == null && invalidFieldsForAliases.get(index) == null) { - createField = true; - } - } - - if (createField) { - int dot = fieldName.lastIndexOf('.'); - /* - * Looking up the "tree" at the parent fields here to see if the field is an alias. - * When the upper elements of the "tree" have no elements in fieldcaps, then this is an alias field. But not - * always: if there are two aliases - a.b.c.alias1 and a.b.c.alias2 - only one of them will be considered alias. - */ - Holder isAliasFieldType = new Holder<>(false); - if (dot >= 0) { - String parentName = fieldName.substring(0, dot); - if (indexFields.flattedMapping.get(parentName) == null) { - // lack of parent implies the field is an alias - if (fieldCaps.get(parentName) == null) { - isAliasFieldType.set(true); - } - } - } - - createField( - typeRegistry, - fieldName, - fieldCaps, - indexFields.hierarchicalMapping, - indexFields.flattedMapping, - s -> invalidField != null - ? invalidField - : createField( - typeRegistry, - s, - typeCap.getType(), - emptyMap(), - typeCap.isAggregatable(), - isAliasFieldType.get() - ) - ); + if (field == null || (invalidField != null && (field instanceof InvalidMappedField) == false)) { + createField(typeRegistry, fieldName, indexFields, fieldCaps, invalidField, typeCap); } } } + // put the field in their respective mappings by alias name + for (String index : uniqueAliases) { + Fields indexFields = indices.computeIfAbsent(index, k -> new Fields()); + EsField field = indexFields.flattedMapping.get(fieldName); + if (field == null && invalidFieldsForAliases.get(index) == null) { + createField(typeRegistry, fieldName, indexFields, fieldCaps, invalidField, typeCap); + } + } } } @@ -788,6 +694,43 @@ private static List buildIndices( return foundIndices; } + private static void createField( + DataTypeRegistry typeRegistry, + String fieldName, + Fields indexFields, + Map> fieldCaps, + InvalidMappedField invalidField, + FieldCapabilities typeCap + ) { + int dot = fieldName.lastIndexOf('.'); + /* + * Looking up the "tree" at the parent fields here to see if the field is an alias. + * When the upper elements of the "tree" have no elements in fieldcaps, then this is an alias field. But not + * always: if there are two aliases - a.b.c.alias1 and a.b.c.alias2 - only one of them will be considered alias. + */ + Holder isAliasFieldType = new Holder<>(false); + if (dot >= 0) { + String parentName = fieldName.substring(0, dot); + if (indexFields.flattedMapping.get(parentName) == null) { + // lack of parent implies the field is an alias + if (fieldCaps.get(parentName) == null) { + isAliasFieldType.set(true); + } + } + } + + createField( + typeRegistry, + fieldName, + fieldCaps, + indexFields.hierarchicalMapping, + indexFields.flattedMapping, + s -> invalidField != null + ? invalidField + : createField(typeRegistry, s, typeCap.getType(), emptyMap(), typeCap.isAggregatable(), isAliasFieldType.get()) + ); + } + /* * Checks if the field is valid (same type and same capabilities - searchable/aggregatable) across indices belonging to a list * of aliases. diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/VersionCompatibilityChecks.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/VersionCompatibilityChecks.java index 532c466e63a7f..e194f385d1606 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/VersionCompatibilityChecks.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/index/VersionCompatibilityChecks.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ql.index; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.core.Nullable; import org.elasticsearch.xpack.ql.type.DataType; @@ -20,7 +21,7 @@ public final class VersionCompatibilityChecks { public static final Version INTRODUCING_UNSIGNED_LONG = V_8_2_0; - public static final TransportVersion INTRODUCING_UNSIGNED_LONG_TRANSPORT = TransportVersion.V_8_2_0; + public static final TransportVersion INTRODUCING_UNSIGNED_LONG_TRANSPORT = TransportVersions.V_8_2_0; public static final Version INTRODUCING_VERSION_FIELD_TYPE = V_8_4_0; private VersionCompatibilityChecks() {} diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java index 3d5976223a667..76cf1b6690335 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/optimizer/OptimizerRules.java @@ -1242,13 +1242,9 @@ protected Expression rule(Or or) { if (found.isEmpty() == false) { // combine equals alongside the existing ors final ZoneId finalZoneId = zoneId; - found.forEach((k, v) -> { - ors.add( - v.size() == 1 - ? new Equals(k.source(), k, v.iterator().next(), finalZoneId) - : createIn(k, new ArrayList<>(v), finalZoneId) - ); - }); + found.forEach( + (k, v) -> { ors.add(v.size() == 1 ? createEquals(k, v, finalZoneId) : createIn(k, new ArrayList<>(v), finalZoneId)); } + ); Expression combineOr = combineOr(ors); // check the result semantically since the result might different in order @@ -1262,6 +1258,10 @@ protected Expression rule(Or or) { return e; } + protected Equals createEquals(Expression k, Set v, ZoneId finalZoneId) { + return new Equals(k.source(), k, v.iterator().next(), finalZoneId); + } + protected In createIn(Expression key, List values, ZoneId zoneId) { return new In(key.source(), key, values, zoneId); } diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DateUtils.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DateUtils.java index c369f129acff5..be13511c92ed4 100644 --- a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DateUtils.java +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/type/DateUtils.java @@ -52,6 +52,10 @@ public static ZonedDateTime asDateTime(long millis) { return ZonedDateTime.ofInstant(Instant.ofEpochMilli(millis), UTC); } + public static long asMillis(ZonedDateTime zonedDateTime) { + return zonedDateTime.toInstant().toEpochMilli(); + } + /** * Parses the given string into a DateTime using UTC as a default timezone. */ diff --git a/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Queries.java b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Queries.java new file mode 100644 index 0000000000000..7bc1e3daf5aa2 --- /dev/null +++ b/x-pack/plugin/ql/src/main/java/org/elasticsearch/xpack/ql/util/Queries.java @@ -0,0 +1,77 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ql.util; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; + +import java.util.List; +import java.util.function.Function; + +import static org.elasticsearch.index.query.QueryBuilders.boolQuery; + +/** + * Utilities for Elasticsearch queries. + */ +public class Queries { + + public enum Clause { + FILTER(BoolQueryBuilder::filter), + MUST(BoolQueryBuilder::must), + MUST_NOT(BoolQueryBuilder::mustNot), + SHOULD(BoolQueryBuilder::should); + + final Function> innerQueries; + + Clause(Function> innerQueries) { + this.innerQueries = innerQueries; + } + } + + /** + * Combines the given queries while attempting to NOT create a new bool query and avoid + * unnecessary nested queries. + * The method tries to detect if the first query is a bool query - if that is the case it will + * reuse that for adding the rest of the clauses. + */ + public static QueryBuilder combine(Clause clause, List queries) { + QueryBuilder firstQuery = null; + BoolQueryBuilder bool = null; + + for (QueryBuilder query : queries) { + if (query == null) { + continue; + } + if (firstQuery == null) { + firstQuery = query; + if (firstQuery instanceof BoolQueryBuilder bqb) { + bool = bqb; + } + } + // at least two entries, start copying + else { + // lazy init the root bool + if (bool == null) { + bool = combine(clause, boolQuery(), firstQuery); + } + // keep adding queries to it + bool = combine(clause, bool, query); + } + } + + return bool == null ? firstQuery : bool; + } + + private static BoolQueryBuilder combine(Clause clause, BoolQueryBuilder bool, QueryBuilder query) { + var list = clause.innerQueries.apply(bool); + if (list.contains(query) == false) { + list.add(query); + } + return bool; + } +} diff --git a/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/QueriesTests.java b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/QueriesTests.java new file mode 100644 index 0000000000000..2c19a2a4f03ce --- /dev/null +++ b/x-pack/plugin/ql/src/test/java/org/elasticsearch/xpack/ql/util/QueriesTests.java @@ -0,0 +1,129 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.ql.util; + +import org.elasticsearch.index.query.BoolQueryBuilder; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.test.ESTestCase; + +import static java.util.Arrays.asList; +import static org.hamcrest.Matchers.everyItem; +import static org.hamcrest.Matchers.in; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.sameInstance; + +public class QueriesTests extends ESTestCase { + + private static QueryBuilder randomNonBoolQuery() { + return randomFrom( + random(), + QueryBuilders::matchAllQuery, + QueryBuilders::idsQuery, + () -> QueryBuilders.rangeQuery(randomRealisticUnicodeOfLength(5)), + () -> QueryBuilders.termQuery(randomAlphaOfLength(5), randomAlphaOfLength(5)), + () -> QueryBuilders.existsQuery(randomAlphaOfLength(5)), + () -> QueryBuilders.geoBoundingBoxQuery(randomAlphaOfLength(5)) + ); + } + + private static BoolQueryBuilder randomBoolQuery() { + var bool = QueryBuilders.boolQuery(); + if (randomBoolean()) { + bool.filter(randomNonBoolQuery()); + } + if (randomBoolean()) { + bool.must(randomNonBoolQuery()); + } + if (randomBoolean()) { + bool.mustNot(randomNonBoolQuery()); + } + if (randomBoolean()) { + bool.should(randomNonBoolQuery()); + } + return bool; + } + + public void testCombineNotCreatingBool() { + var clause = randomFrom(Queries.Clause.values()); + var nonBool = randomNonBoolQuery(); + assertThat(nonBool, sameInstance(Queries.combine(clause, asList(null, null, nonBool, null)))); + } + + public void testCombineNonBoolQueries() { + var queries = randomArray(2, 10, QueryBuilder[]::new, QueriesTests::randomNonBoolQuery); + + var clause = randomFrom(Queries.Clause.values()); + var list = asList(queries); + var combination = Queries.combine(clause, list); + + assertThat(combination, instanceOf(BoolQueryBuilder.class)); + var bool = (BoolQueryBuilder) combination; + var clauseList = clause.innerQueries.apply(bool); + assertThat(list, everyItem(in(clauseList))); + } + + public void testCombineBoolQueries() { + var queries = randomArray(2, 10, QueryBuilder[]::new, () -> { + var bool = QueryBuilders.boolQuery(); + if (randomBoolean()) { + bool.filter(randomNonBoolQuery()); + } + if (randomBoolean()) { + bool.must(randomNonBoolQuery()); + } + if (randomBoolean()) { + bool.mustNot(randomNonBoolQuery()); + } + if (randomBoolean()) { + bool.should(randomNonBoolQuery()); + } + return bool; + }); + + var clause = randomFrom(Queries.Clause.values()); + var list = asList(queries); + var combination = Queries.combine(clause, list); + + assertThat(combination, instanceOf(BoolQueryBuilder.class)); + var bool = (BoolQueryBuilder) combination; + + var clauseList = clause.innerQueries.apply(bool); + + for (QueryBuilder query : queries) { + if (query != bool) { + assertThat(query, in(clauseList)); + } + } + } + + public void testCombineMixedBoolAndNonBoolQueries() { + var queries = randomArray(2, 10, QueryBuilder[]::new, () -> { + if (randomBoolean()) { + return QueriesTests.randomBoolQuery(); + } else { + return QueriesTests.randomNonBoolQuery(); + } + }); + + var clause = randomFrom(Queries.Clause.values()); + var list = asList(queries); + var combination = Queries.combine(clause, list); + + assertThat(combination, instanceOf(BoolQueryBuilder.class)); + var bool = (BoolQueryBuilder) combination; + + var clauseList = clause.innerQueries.apply(bool); + + for (QueryBuilder query : queries) { + if (query != bool) { + assertThat(query, in(clauseList)); + } + } + } +} diff --git a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java index 1f2db8dd58f57..240df6ecb0227 100644 --- a/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java +++ b/x-pack/plugin/ql/test-fixtures/src/main/java/org/elasticsearch/xpack/ql/CsvSpecReader.java @@ -61,6 +61,8 @@ public Object parse(String line) { // read data if (line.toLowerCase(Locale.ROOT).startsWith("warning:")) { testCase.expectedWarnings.add(line.substring("warning:".length()).trim()); + } else if (line.toLowerCase(Locale.ROOT).startsWith("ignoreorder:")) { + testCase.ignoreOrder = Boolean.parseBoolean(line.substring("ignoreOrder:".length()).trim()); } else if (line.startsWith(";")) { testCase.expectedResults = data.toString(); // clean-up and emit @@ -83,6 +85,7 @@ public static class CsvTestCase { public String earlySchema; public String expectedResults; public List expectedWarnings = new ArrayList<>(); + public boolean ignoreOrder; } } diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java index 90900644ea294..f0a8e299de632 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.license.LicenseUtils; @@ -86,7 +87,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_8_0; + return TransportVersions.V_8_8_0; } public int rankConstant() { diff --git a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardResult.java b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardResult.java index 52f808fe8bb35..18a6e041d7873 100644 --- a/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardResult.java +++ b/x-pack/plugin/rank-rrf/src/main/java/org/elasticsearch/xpack/rank/rrf/RRFRankShardResult.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.rank.rrf; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.rank.RankShardResult; @@ -48,7 +49,7 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_8_0; + return TransportVersions.V_8_8_0; } @Override diff --git a/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesMeteringResponse.java b/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesMeteringResponse.java index 209f9a4c00278..811e23c0b44bb 100644 --- a/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesMeteringResponse.java +++ b/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesMeteringResponse.java @@ -34,12 +34,12 @@ public RepositoriesMeteringResponse( @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(RepositoriesNodeMeteringResponse::new); + return in.readCollectionAsList(RepositoriesNodeMeteringResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesNodeMeteringResponse.java b/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesNodeMeteringResponse.java index 31d7486a72dbb..106740dcece62 100644 --- a/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesNodeMeteringResponse.java +++ b/x-pack/plugin/repositories-metering-api/src/main/java/org/elasticsearch/xpack/repositories/metering/action/RepositoriesNodeMeteringResponse.java @@ -30,7 +30,7 @@ public RepositoriesNodeMeteringResponse(DiscoveryNode node, List jobs) { RollupIndexCaps(StreamInput in) throws IOException { this.rollupIndexName = in.readString(); - this.jobCaps = in.readList(RollupJobCaps::new); + this.jobCaps = in.readCollectionAsList(RollupJobCaps::new); } protected List getJobCaps() { @@ -178,7 +178,7 @@ static RollupIndexCaps parseMetadataXContent(BytesReference source, String index @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(rollupIndexName); - out.writeList(jobCaps); + out.writeCollection(jobCaps); } @Override diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java index 5e1e08d74d324..dd78eb95e7f3d 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportRollupSearchAction.java @@ -270,10 +270,9 @@ private static SearchSourceBuilder copyWriteable( NamedWriteableRegistry namedWriteableRegistry, Writeable.Reader reader ) throws IOException { - Writeable.Writer writer = (out, value) -> value.writeTo(out); try (BytesStreamOutput output = new BytesStreamOutput()) { output.setTransportVersion(TransportVersion.current()); - writer.write(output, original); + original.writeTo(output); try (StreamInput in = new NamedWriteableAwareStreamInput(output.bytes().streamInput(), namedWriteableRegistry)) { in.setTransportVersion(TransportVersion.current()); return reader.read(in); diff --git a/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java b/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java index 913a6df734f27..571f75a3ca648 100644 --- a/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java +++ b/x-pack/plugin/search-business-rules/src/internalClusterTest/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilderIT.java @@ -395,4 +395,67 @@ public void testMultiIndexWithAliases() throws Exception { assertSecondHit(searchResponse, both(hasIndex("test")).and(hasId("a"))); assertThirdHit(searchResponse, both(hasIndex("test")).and(hasId("c"))); } + + public void testMultiIndexWithAliasesAndDuplicateIds() throws Exception { + Alias alias = new Alias("test-alias"); + + assertAcked( + prepareCreate("test1").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field1") + .field("analyzer", "whitespace") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", randomIntBetween(2, 5))).addAlias(alias) + ); + + assertAcked( + prepareCreate("test2").setMapping( + jsonBuilder().startObject() + .startObject("_doc") + .startObject("properties") + .startObject("field1") + .field("analyzer", "whitespace") + .field("type", "text") + .endObject() + .endObject() + .endObject() + .endObject() + ).setSettings(Settings.builder().put(indexSettings()).put("index.number_of_shards", randomIntBetween(2, 5))).addAlias(alias) + ); + + client().prepareIndex("test1").setId("a").setSource("field1", "document a").get(); + client().prepareIndex("test1").setId("b").setSource("field1", "document b").get(); + client().prepareIndex("test1").setId("c").setSource("field1", "document c").get(); + + client().prepareIndex("test2").setId("a").setSource("field1", "document a").get(); + + refresh(); + + PinnedQueryBuilder pqb = new PinnedQueryBuilder( + QueryBuilders.queryStringQuery("document"), + new Item("test1", "b"), + new Item(null, "a"), + new Item("test1", "c"), + new Item("test1", "a"), + new Item("test-alias", "a") + ); + + SearchResponse searchResponse = client().prepareSearch() + .setQuery(pqb) + .setTrackTotalHits(true) + .setSearchType(DFS_QUERY_THEN_FETCH) + .get(); + + assertHitCount(searchResponse, 4); + assertFirstHit(searchResponse, both(hasIndex("test1")).and(hasId("b"))); + assertSecondHit(searchResponse, hasId("a")); + assertThirdHit(searchResponse, hasId("a")); + assertFourthHit(searchResponse, both(hasIndex("test1")).and(hasId("c"))); + } } diff --git a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java index 1d8e5b6c6a132..1db5d020fc0ee 100644 --- a/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java +++ b/x-pack/plugin/search-business-rules/src/main/java/org/elasticsearch/xpack/searchbusinessrules/PinnedQueryBuilder.java @@ -14,6 +14,7 @@ import org.apache.lucene.search.Query; import org.apache.lucene.util.NumericUtils; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -56,6 +57,8 @@ public class PinnedQueryBuilder extends AbstractQueryBuilder public static final ParseField DOCS_FIELD = new ParseField("docs"); public static final ParseField ORGANIC_QUERY_FIELD = new ParseField("organic"); + private static final TransportVersion OPTIONAL_INDEX_IN_DOCS_VERSION = TransportVersions.V_8_500_071; + private final List ids; private final List docs; private QueryBuilder organicQuery; @@ -83,10 +86,7 @@ public static final class Item implements ToXContentObject, Writeable { * @param id and its id */ public Item(String index, String id) { - if (index == null) { - throw new IllegalArgumentException("Item requires index to be non-null"); - } - if (Regex.isSimpleMatchPattern(index)) { + if (index != null && Regex.isSimpleMatchPattern(index)) { throw new IllegalArgumentException("Item index cannot contain wildcard expressions"); } if (id == null) { @@ -105,20 +105,35 @@ private Item(String id) { * Read from a stream. */ public Item(StreamInput in) throws IOException { - index = in.readString(); + if (in.getTransportVersion().onOrAfter(OPTIONAL_INDEX_IN_DOCS_VERSION)) { + index = in.readOptionalString(); + } else { + index = in.readString(); + } id = in.readString(); } @Override public void writeTo(StreamOutput out) throws IOException { - out.writeString(index); + if (out.getTransportVersion().onOrAfter(OPTIONAL_INDEX_IN_DOCS_VERSION)) { + out.writeOptionalString(index); + } else { + if (index == null) { + throw new IllegalArgumentException( + "[_index] needs to be specified for docs elements when cluster nodes are not in the same version" + ); + } + out.writeString(index); + } out.writeString(id); } @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); - builder.field(INDEX_FIELD.getPreferredName(), this.index); + if (this.index != null) { + builder.field(INDEX_FIELD.getPreferredName(), this.index); + } builder.field(ID_FIELD.getPreferredName(), this.id); return builder.endObject(); } @@ -129,7 +144,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws ); static { - PARSER.declareString(constructorArg(), INDEX_FIELD); + PARSER.declareString(optionalConstructorArg(), INDEX_FIELD); PARSER.declareString(constructorArg(), ID_FIELD); } @@ -219,19 +234,19 @@ private PinnedQueryBuilder(QueryBuilder organicQuery, List ids, List new Item(randomAlphaOfLength(64), randomAlphaOfLength(256))); + return randomArray(1, 100, Item[]::new, () -> new Item(randomBoolean() ? null : randomAlphaOfLength(64), randomAlphaOfLength(256))); } @Override diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java index 961118d0ab84e..1cdfa51bdf776 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/SearchableSnapshotsPrewarmingIntegTests.java @@ -65,6 +65,7 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Set; @@ -461,6 +462,11 @@ public BlobContainer blobContainer(BlobPath path) { return new TrackingFilesBlobContainer(delegate.blobContainer(path)); } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) throws IOException { + delegate.deleteBlobsIgnoringIfNotExists(blobNames); + } + @Override public void close() throws IOException { delegate.close(); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 35c7e703bf008..f7a1d3e42036a 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -370,7 +370,7 @@ public Collection createComponents( // then marking the feature as used. We do this on each master node so that if one master fails, the // continue reporting usage state. var usageTracker = new SearchableSnapshotsUsageTracker(getLicenseState(), clusterService::state); - threadPool.scheduleWithFixedDelay(usageTracker, TimeValue.timeValueMinutes(15), ThreadPool.Names.GENERIC); + threadPool.scheduleWithFixedDelay(usageTracker, TimeValue.timeValueMinutes(15), threadPool.generic()); } this.allocator.set(new SearchableSnapshotAllocator(client, clusterService.getRerouteService(), frozenCacheInfoService)); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java index 1270d02325a45..c5e7cec7e5cca 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java @@ -35,7 +35,7 @@ public class SearchableSnapshotsStatsResponse extends BroadcastResponse { SearchableSnapshotsStatsResponse(StreamInput in) throws IOException { super(in); - this.stats = in.readList(SearchableSnapshotShardStats::new); + this.stats = in.readCollectionAsList(SearchableSnapshotShardStats::new); } SearchableSnapshotsStatsResponse( @@ -73,7 +73,7 @@ private static List computeCompound(Stream readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeCacheFilesMetadata::new); + return in.readCollectionAsList(NodeCacheFilesMetadata::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/cache/TransportSearchableSnapshotsNodeCachesStatsAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/cache/TransportSearchableSnapshotsNodeCachesStatsAction.java index e85ea6948292f..9a12661cc2a67 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/cache/TransportSearchableSnapshotsNodeCachesStatsAction.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/cache/TransportSearchableSnapshotsNodeCachesStatsAction.java @@ -289,12 +289,12 @@ public NodesCachesStatsResponse(ClusterName clusterName, List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeCachesStatsResponse::new); + return in.readCollectionAsList(NodeCachesStatsResponse::new); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java index b20327b3a4159..1b09ea10d36a8 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/blob/BlobStoreCacheMaintenanceService.java @@ -219,7 +219,7 @@ private synchronized void startPeriodicTask() { final TimeValue delay = periodicTaskInterval; if (delay.getMillis() > 0L) { final PeriodicMaintenanceTask task = new PeriodicMaintenanceTask(periodicTaskKeepAlive, periodicTaskBatchSize); - periodicTask = threadPool.schedule(task, delay, ThreadPool.Names.GENERIC); + periodicTask = threadPool.schedule(task, delay, threadPool.generic()); } else { periodicTask = null; } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java index 3daa72efdd608..db56addb434c7 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/full/CacheService.java @@ -636,7 +636,7 @@ public void synchronizeCache() { class CacheSynchronizationTask extends AbstractAsyncTask { CacheSynchronizationTask(ThreadPool threadPool, TimeValue interval) { - super(logger, Objects.requireNonNull(threadPool), Objects.requireNonNull(interval), true); + super(logger, Objects.requireNonNull(threadPool), threadPool.generic(), Objects.requireNonNull(interval), true); } @Override @@ -649,11 +649,6 @@ public void runInternal() { synchronizeCache(); } - @Override - protected String getThreadPool() { - return ThreadPool.Names.GENERIC; - } - @Override public String toString() { return "cache_synchronization_task"; diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/FrozenCacheInfoService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/FrozenCacheInfoService.java index 936b47856e7ab..cd21f8cbd9a44 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/FrozenCacheInfoService.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/shared/FrozenCacheInfoService.java @@ -15,8 +15,8 @@ import org.elasticsearch.cluster.routing.RerouteService; import org.elasticsearch.common.Priority; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.searchablesnapshots.action.cache.FrozenCacheInfoAction; import org.elasticsearch.xpack.searchablesnapshots.action.cache.FrozenCacheInfoResponse; @@ -136,7 +136,7 @@ private void retryOrRecordFailure(Exception e) { logger.debug(() -> format("failed to retrieve node settings from node %s, shouldRetry=%s", discoveryNode, shouldRetry), e); if (shouldRetry) { // failure is likely something like a CircuitBreakingException, so there's no sense in an immediate retry - client.threadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(1), ThreadPool.Names.SAME, AsyncNodeFetch.this); + client.threadPool().scheduleUnlessShuttingDown(TimeValue.timeValueSeconds(1), EsExecutors.DIRECT_EXECUTOR_SERVICE, this); } else { updateEntry(NodeState.FAILED); } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java index ea9ed833dfe64..e1531e4e8342e 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInput.java @@ -25,7 +25,7 @@ import java.io.InputStream; import java.nio.ByteBuffer; -public class FrozenIndexInput extends MetadataCachingIndexInput { +public final class FrozenIndexInput extends MetadataCachingIndexInput { private static final Logger logger = LogManager.getLogger(FrozenIndexInput.class); @@ -94,6 +94,14 @@ private FrozenIndexInput( this.cacheFile = cacheFile.copy(); } + /** + * Clone constructor, will mark this as cloned. + */ + private FrozenIndexInput(FrozenIndexInput input) { + super(input); + this.cacheFile = input.cacheFile.copy(); + } + @Override protected void readWithoutBlobCache(ByteBuffer b) throws Exception { final long position = getAbsolutePosition(); @@ -196,4 +204,13 @@ protected MetadataCachingIndexInput doSlice( ); } + @Override + public FrozenIndexInput clone() { + return new FrozenIndexInput(this); + } + + // for tests only + SharedBlobCacheService.CacheFile cacheFile() { + return cacheFile; + } } diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java index b0ff6b5718288..c4fa5efc1c012 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/store/input/MetadataCachingIndexInput.java @@ -39,6 +39,7 @@ import java.io.IOException; import java.io.InputStream; +import java.io.UncheckedIOException; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.util.Objects; @@ -145,6 +146,35 @@ protected MetadataCachingIndexInput( assert getBufferSize() <= BlobStoreCacheService.DEFAULT_CACHED_BLOB_SIZE; // must be able to cache at least one buffer's worth } + /** + * Clone constructor, will mark this as cloned. + */ + protected MetadataCachingIndexInput(MetadataCachingIndexInput input) { + this( + input.logger, + "(clone of) " + input, + input.directory, + input.fileInfo, + input.context, + input.stats, + input.offset, + input.compoundFileOffset, + input.length, + input.cacheFileReference, + input.defaultRangeSize, + input.recoveryRangeSize, + input.headerBlobCacheByteRange, + input.footerBlobCacheByteRange + ); + this.isClone = true; + try { + seek(input.getFilePointer()); + } catch (IOException e) { + assert false : e; + throw new UncheckedIOException(e); + } + } + /** * Detects read operations that are executed on the last 16 bytes of the index input which is where Lucene stores the footer checksum * of Lucene files. If such a read is detected this method tries to complete the read operation by reading the checksum from the diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java index b5726a4cb7367..97e5ad3ffec4a 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/SearchableSnapshotDirectoryTests.java @@ -972,7 +972,7 @@ private static IndexSettings newIndexSettings() { "_index", Settings.builder() .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) - .put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()) .build() ); } diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java index 805481862fc30..9fc95c137976c 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/DirectBlobContainerIndexInputTests.java @@ -142,6 +142,25 @@ public void testRandomReads() throws IOException { } } + public void testCloneAndLargeRead() throws IOException { + final Tuple bytes = randomChecksumBytes(between(ByteSizeUnit.KB.toIntBytes(2), ByteSizeUnit.KB.toIntBytes(10))); + try (var indexInput = createIndexInput(bytes)) { + indexInput.readLong(); + + final var clone = indexInput.clone(); + + // do a read which is large enough to exercise the path which bypasses the buffer and fills the output directly + + final var originalBytes = new byte[2048]; + indexInput.readBytes(originalBytes, 0, originalBytes.length); + + final var cloneBytes = new byte[originalBytes.length]; + clone.readBytes(cloneBytes, 0, cloneBytes.length); + + assertArrayEquals(originalBytes, cloneBytes); + } + } + public void testRandomOverflow() throws IOException { for (int i = 0; i < 100; i++) { final Tuple bytes = randomChecksumBytes(randomIntBetween(1, 1000)); diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java index d0da8573affbd..f6f494cd46099 100644 --- a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/store/input/FrozenIndexInputTests.java @@ -41,7 +41,9 @@ import static org.elasticsearch.core.IOUtils.WINDOWS; import static org.elasticsearch.xpack.searchablesnapshots.cache.full.CacheService.resolveSnapshotCache; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.not; public class FrozenIndexInputTests extends AbstractSearchableSnapshotsTestCase { @@ -130,6 +132,13 @@ public void testRandomReads() throws IOException { final byte[] result = randomReadAndSlice(indexInput, fileData.length); assertArrayEquals(fileData, result); + + // validate clone copies cache file object + indexInput.seek(randomLongBetween(0, fileData.length - 1)); + FrozenIndexInput clone = (FrozenIndexInput) indexInput.clone(); + assertThat(clone.cacheFile(), not(equalTo(((FrozenIndexInput) indexInput).cacheFile()))); + assertThat(clone.getFilePointer(), equalTo(indexInput.getFilePointer())); + indexInput.close(); } } diff --git a/x-pack/plugin/security/build.gradle b/x-pack/plugin/security/build.gradle index 6ee43268b8746..fc5774ccbdb3a 100644 --- a/x-pack/plugin/security/build.gradle +++ b/x-pack/plugin/security/build.gradle @@ -20,6 +20,8 @@ dependencies { api project(path: ':modules:transport-netty4') testImplementation project(path: xpackModule('ilm')) + testImplementation project(path: xpackModule('downsample')) + testImplementation project(path: xpackModule('mapper-aggregate-metric')) testImplementation project(path: xpackModule('monitoring')) testImplementation project(path: xpackModule('spatial')) testImplementation project(path: xpackModule('wildcard')) diff --git a/x-pack/plugin/security/qa/audit/build.gradle b/x-pack/plugin/security/qa/audit/build.gradle new file mode 100644 index 0000000000000..f160e4ae3f1d5 --- /dev/null +++ b/x-pack/plugin/security/qa/audit/build.gradle @@ -0,0 +1,12 @@ +apply plugin: 'elasticsearch.internal-java-rest-test' + +dependencies { + javaRestTestImplementation project(':x-pack:plugin:core') + javaRestTestImplementation project(':x-pack:plugin:security') + clusterModules(project(":modules:analysis-common")) + clusterModules(project(":modules:rest-root")) +} + +tasks.named('javaRestTest') { + usesDefaultDistribution() +} diff --git a/x-pack/plugin/security/qa/audit/src/javaRestTest/java/org/elasticsearch/xpack/security/audit/AuditIT.java b/x-pack/plugin/security/qa/audit/src/javaRestTest/java/org/elasticsearch/xpack/security/audit/AuditIT.java new file mode 100644 index 0000000000000..e6af9c634e72f --- /dev/null +++ b/x-pack/plugin/security/qa/audit/src/javaRestTest/java/org/elasticsearch/xpack/security/audit/AuditIT.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.audit; + +import org.elasticsearch.client.Request; +import org.elasticsearch.client.RequestOptions; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.WarningsHandler; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.SecureString; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.CheckedConsumer; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.LogType; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xpack.security.audit.logfile.LoggingAuditTrail; +import org.junit.ClassRule; + +import java.io.IOException; +import java.time.Instant; +import java.time.ZonedDateTime; +import java.time.format.DateTimeFormatter; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; + +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.hasEntry; +import static org.hamcrest.Matchers.hasKey; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.not; + +public class AuditIT extends ESRestTestCase { + + private static final String API_USER = "api_user"; + private static final DateTimeFormatter TSTAMP_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss,SSSZ"); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(1) // A single node makes it easier to find audit events + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.audit.enabled", "true") + .setting("xpack.security.audit.logfile.events.include", "[ \"_all\" ]") + .setting("xpack.security.audit.logfile.events.emit_request_body", "true") + .user("admin_user", "admin-password") + .user(API_USER, "api-password", "superuser", false) + .build(); + + @Override + protected Settings restAdminSettings() { + String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected Settings restClientSettings() { + String token = basicAuthHeaderValue(API_USER, new SecureString("api-password".toCharArray())); + return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); + } + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + + public void testAuditAuthenticationSuccess() throws Exception { + final Request request = new Request("GET", randomFrom("/_security/_authenticate", "/_xpack/security/_authenticate")); + executeAndVerifyAudit(request, AuditLevel.AUTHENTICATION_SUCCESS, event -> { + assertThat(event, hasEntry(LoggingAuditTrail.AUTHENTICATION_TYPE_FIELD_NAME, "REALM")); + }); + } + + public void testAuditAuthenticationFailure() throws Exception { + final Request request = new Request("GET", randomFrom("/_security/_authenticate", "/_xpack/security/_authenticate")); + String basicAuth = basicAuthHeaderValue(API_USER, new SecureString(new char[0])); + request.setOptions(request.getOptions().toBuilder().addHeader("Authorization", basicAuth).addParameter("ignore", "401")); + executeAndVerifyAudit(request, AuditLevel.AUTHENTICATION_FAILED, event -> {}); + } + + public void testFilteringOfRequestBodies() throws Exception { + final String username = randomAlphaOfLength(4) + randomIntBetween(100, 999); + final Request request = new Request(randomFrom("PUT", "POST"), randomFrom("/_security/user/", "/_xpack/security/user/") + username); + final String password = randomAlphaOfLength(4) + randomIntBetween(10, 99) + randomAlphaOfLength(4); + request.setJsonEntity("{ \"password\":\"" + password + "\", \"roles\":[\"superuser\"] }"); + executeAndVerifyAudit(request, AuditLevel.AUTHENTICATION_SUCCESS, event -> { + assertThat(event, hasEntry(LoggingAuditTrail.REQUEST_BODY_FIELD_NAME, "{\"roles\":[\"superuser\"]}")); + assertThat(toJson(event), not(containsString(password))); + }); + } + + private void executeAndVerifyAudit(Request request, AuditLevel eventType, CheckedConsumer, Exception> assertions) + throws Exception { + Instant start = Instant.now(); + executeRequest(request); + assertBusy(() -> { + try (var auditLog = cluster.getNodeLog(0, LogType.AUDIT)) { + final List lines = Streams.readAllLines(auditLog); + final List> events = findEvents(lines, eventType, e -> { + if (API_USER.equals(e.get(LoggingAuditTrail.PRINCIPAL_FIELD_NAME)) == false) { + return false; + } + Instant tstamp = ZonedDateTime.parse(String.valueOf(e.get(LoggingAuditTrail.TIMESTAMP)), TSTAMP_FORMATTER).toInstant(); + if (tstamp.isBefore(start)) { + return false; + } + return true; + }); + if (events.isEmpty()) { + fail("Could not find any [" + eventType + "] events for [" + API_USER + "] in [" + String.join("\n", lines) + "]"); + } + assertThat(events, hasSize(1)); + final Map event = events.get(0); + assertThat(event, hasEntry("type", "audit")); + assertThat(event, hasKey(LoggingAuditTrail.NODE_ID_FIELD_NAME)); + assertThat(event, hasKey(LoggingAuditTrail.REQUEST_ID_FIELD_NAME)); + assertThat(event, hasEntry(LoggingAuditTrail.EVENT_TYPE_FIELD_NAME, "rest")); + assertThat(event, hasEntry(LoggingAuditTrail.ORIGIN_TYPE_FIELD_NAME, "rest")); + assertThat(event, hasEntry(LoggingAuditTrail.URL_PATH_FIELD_NAME, request.getEndpoint())); + assertThat(event, hasEntry(LoggingAuditTrail.REQUEST_METHOD_FIELD_NAME, request.getMethod())); + assertions.accept(event); + + } + }, 5, TimeUnit.SECONDS); + } + + private static Response executeRequest(Request request) throws IOException { + if (request.getEndpoint().startsWith("/_xpack/security/")) { + final RequestOptions options = request.getOptions() + .toBuilder() + .addHeader("Content-Type", "application/json; compatible-with=7") + .addHeader("Accept", "application/json; compatible-with=7") + .setWarningsHandler(WarningsHandler.PERMISSIVE) + .build(); + request.setOptions(options); + } + return client().performRequest(request); + } + + private List> findEvents(List lines, AuditLevel level, Predicate> filter) { + final String eventType = level.name().toLowerCase(Locale.ROOT); + final List> events = new ArrayList<>(); + for (var line : lines) { + if (line.contains(eventType)) { + Map event = XContentHelper.convertToMap(XContentType.JSON.xContent(), line, true); + if (event.get(LoggingAuditTrail.EVENT_ACTION_FIELD_NAME).equals(eventType) && filter.test(event)) { + events.add(event); + } + } + } + return events; + } + + private static String toJson(Map map) throws IOException { + final XContentBuilder builder = XContentFactory.jsonBuilder().map(map); + final BytesReference bytes = BytesReference.bytes(builder); + return bytes.utf8ToString(); + } + +} diff --git a/x-pack/plugin/security/qa/jwt-realm/build.gradle b/x-pack/plugin/security/qa/jwt-realm/build.gradle index d561f0c8ceeed..bc7178f11d9fc 100644 --- a/x-pack/plugin/security/qa/jwt-realm/build.gradle +++ b/x-pack/plugin/security/qa/jwt-realm/build.gradle @@ -1,7 +1,7 @@ import org.elasticsearch.gradle.Version import org.elasticsearch.gradle.internal.info.BuildParams -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { javaRestTestImplementation project(path: xpackModule('core')) @@ -10,103 +10,6 @@ dependencies { javaRestTestImplementation project(":client:rest") } -def random = BuildParams.random -boolean explicitIdTokenType = random.nextBoolean() -def serviceSubject = 'service_' + random.nextInt(1, 9) + '@app' + random.nextInt(1, 9) + '.example.com' - -tasks.named("javaRestTest").configure { - systemProperty 'jwt2.service_subject', serviceSubject -} - -testClusters.matching { it.name == 'javaRestTest' }.configureEach { - testDistribution = 'DEFAULT' - - numberOfNodes = 2 - - // This can be turned on to help debug failing tests, but generates more output than is needed for CI - // setting 'logger.org.elasticsearch.xpack.security.authc.jwt', 'DEBUG' - - extraConfigFile 'http.key', file('src/javaRestTest/resources/ssl/http.key') - extraConfigFile 'http.crt', file('src/javaRestTest/resources/ssl/http.crt') - extraConfigFile 'ca.crt', file('src/javaRestTest/resources/ssl/ca.crt') - extraConfigFile 'rsa.jwkset', file('src/javaRestTest/resources/jwk/rsa-public-jwkset.json') - - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.http.ssl.enabled', 'true' - setting 'xpack.security.transport.ssl.enabled', 'false' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.authc.api_key.enabled', 'true' - - setting 'xpack.security.http.ssl.enabled', 'true' - setting 'xpack.security.http.ssl.certificate', 'http.crt' - setting 'xpack.security.http.ssl.key', 'http.key' - setting 'xpack.security.http.ssl.key_passphrase', 'http-password' - setting 'xpack.security.http.ssl.certificate_authorities', 'ca.crt' - setting 'xpack.security.http.ssl.client_authentication', 'optional' - - setting 'xpack.security.authc.realms.file.admin_file.order', '0' - - // These realm settings are generated by JwtRealmGenerateTests - setting 'xpack.security.authc.realms.jwt.jwt1.order', '1' - if (explicitIdTokenType) { - setting 'xpack.security.authc.realms.jwt.jwt1.token_type', 'id_token' - } - setting 'xpack.security.authc.realms.jwt.jwt1.allowed_issuer', 'https://issuer.example.com/' - setting 'xpack.security.authc.realms.jwt.jwt1.allowed_audiences', 'https://audience.example.com/' - setting 'xpack.security.authc.realms.jwt.jwt1.claims.principal', 'sub' - setting 'xpack.security.authc.realms.jwt.jwt1.claims.groups', 'roles' - setting 'xpack.security.authc.realms.jwt.jwt1.claims.dn', 'dn' - setting 'xpack.security.authc.realms.jwt.jwt1.claims.name', 'name' - setting 'xpack.security.authc.realms.jwt.jwt1.claims.mail', 'mail' - setting 'xpack.security.authc.realms.jwt.jwt1.required_claims.token_use', 'id' - setting 'xpack.security.authc.realms.jwt.jwt1.required_claims.version', '2.0' - setting 'xpack.security.authc.realms.jwt.jwt1.client_authentication.type', 'NONE' - // Use default value (RS256) for signature algorithm - setting 'xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path', 'rsa.jwkset' - - // Place native realm after JWT realm to verify realm chain fall-through - setting 'xpack.security.authc.realms.native.lookup_native.order', '2' - - setting 'xpack.security.authc.realms.jwt.jwt2.order', '3' - setting 'xpack.security.authc.realms.jwt.jwt2.token_type', 'access_token' - setting 'xpack.security.authc.realms.jwt.jwt2.fallback_claims.sub', 'email' - setting 'xpack.security.authc.realms.jwt.jwt2.fallback_claims.aud', 'scope' - setting 'xpack.security.authc.realms.jwt.jwt2.allowed_issuer', 'my-issuer' - setting 'xpack.security.authc.realms.jwt.jwt2.allowed_subjects', serviceSubject - setting 'xpack.security.authc.realms.jwt.jwt2.allowed_audiences', 'es01,es02,es03' - setting 'xpack.security.authc.realms.jwt.jwt2.allowed_signature_algorithms', 'HS256,HS384' - // Both email or sub works because of fallback - if (random.nextBoolean()) { - setting 'xpack.security.authc.realms.jwt.jwt2.claims.principal', 'email' - } else { - setting 'xpack.security.authc.realms.jwt.jwt2.claims.principal', 'sub' - } - setting 'xpack.security.authc.realms.jwt.jwt2.claim_patterns.principal', '^(.*)@[^.]*[.]example[.]com$' - setting 'xpack.security.authc.realms.jwt.jwt2.required_claims.token_use', 'access' - setting 'xpack.security.authc.realms.jwt.jwt2.authorization_realms', 'lookup_native' - setting 'xpack.security.authc.realms.jwt.jwt2.client_authentication.type', 'shared_secret' - keystore 'xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret', 'test-secret' - keystore 'xpack.security.authc.realms.jwt.jwt2.hmac_key', 'test-HMAC/secret passphrase-value' - - // Place PKI realm after JWT realm to verify realm chain fall-through - setting 'xpack.security.authc.realms.pki.pki_realm.order', '4' - - setting 'xpack.security.authc.realms.jwt.jwt3.order', '5' - if (explicitIdTokenType) { - setting 'xpack.security.authc.realms.jwt.jwt3.token_type', 'id_token' - } - setting 'xpack.security.authc.realms.jwt.jwt3.allowed_issuer', 'jwt3-issuer' - setting 'xpack.security.authc.realms.jwt.jwt3.allowed_audiences', '[jwt3-audience]' - setting 'xpack.security.authc.realms.jwt.jwt3.allowed_signature_algorithms', '[HS384, HS512]' - setting 'xpack.security.authc.realms.jwt.jwt3.claims.principal', 'sub' - setting 'xpack.security.authc.realms.jwt.jwt3.client_authentication.type', 'Shared_Secret' - keystore 'xpack.security.authc.realms.jwt.jwt3.hmac_jwkset', - '{"keys":[{"kty":"oct","kid":"test-hmac-384","k":"W3mR8v_MP0_YdDo1OB0uwOgPX6-7PzkICVxMDVCZlPGw3vyPr8SRb5akrRSNU-zV"},{"kty":"oct","kid":"test-hmac-512","k":"U4kMAa7tBwKOD4ggab4ZRGeHlFTILgNbescS1b5nambKJPmrB7QjeTryvfrE8zjYSvLxW2-tzFJUpk38a6FjPA"}]}' - keystore 'xpack.security.authc.realms.jwt.jwt3.client_authentication.shared_secret', 'test-secret' - - user username: "admin_user", password: "admin-password" - user username: "test_file_user", password: "test-password", role: "viewer" +tasks.named('javaRestTest') { + usesDefaultDistribution() } - diff --git a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java index 79da6b75d90e3..a3d4c94e91882 100644 --- a/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java +++ b/x-pack/plugin/security/qa/jwt-realm/src/javaRestTest/java/org/elasticsearch/xpack/security/authc/jwt/JwtRestIT.java @@ -22,6 +22,7 @@ import com.nimbusds.jwt.SignedJWT; import org.apache.http.HttpHost; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.client.RequestOptions; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -34,6 +35,10 @@ import org.elasticsearch.core.Strings; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.LocalClusterSpec; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentType; @@ -41,6 +46,7 @@ import org.elasticsearch.xpack.core.security.user.User; import org.hamcrest.Matchers; import org.junit.BeforeClass; +import org.junit.ClassRule; import java.io.FileNotFoundException; import java.io.IOException; @@ -68,10 +74,111 @@ public class JwtRestIT extends ESRestTestCase { - private static final Optional VALID_SHARED_SECRET = Optional.of("test-secret"); + private static final String HMAC_JWKSET = """ + {"keys":[ + {"kty":"oct","kid":"test-hmac-384","k":"W3mR8v_MP0_YdDo1OB0uwOgPX6-7PzkICVxMDVCZlPGw3vyPr8SRb5akrRSNU-zV"}, + {"kty":"oct","kid":"test-hmac-512","k":"U4kMAa7tBwKOD4ggab4ZRGeHlFTILgNbescS1b5nambKJPmrB7QjeTryvfrE8zjYSvLxW2-tzFJUpk38a6FjPA"} + ]}""".replaceAll("\\s", ""); + public static final String HMAC_PASSPHRASE = "test-HMAC/secret passphrase-value"; + private static final String VALID_SHARED_SECRET = "test-secret"; + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(2) + .distribution(DistributionType.DEFAULT) + .configFile("http.key", Resource.fromClasspath("ssl/http.key")) + .configFile("http.crt", Resource.fromClasspath("ssl/http.crt")) + .configFile("ca.crt", Resource.fromClasspath("ssl/ca.crt")) + .configFile("rsa.jwkset", Resource.fromClasspath("jwk/rsa-public-jwkset.json")) + .setting("xpack.ml.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.http.ssl.enabled", "true") + .setting("xpack.security.transport.ssl.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + + .setting("xpack.security.http.ssl.enabled", "true") + .setting("xpack.security.http.ssl.certificate", "http.crt") + .setting("xpack.security.http.ssl.key", "http.key") + .setting("xpack.security.http.ssl.key_passphrase", "http-password") + .setting("xpack.security.http.ssl.certificate_authorities", "ca.crt") + .setting("xpack.security.http.ssl.client_authentication", "optional") + .settings(JwtRestIT::realmSettings) + .keystore("xpack.security.authc.realms.jwt.jwt2.client_authentication.shared_secret", VALID_SHARED_SECRET) + .keystore("xpack.security.authc.realms.jwt.jwt2.hmac_key", HMAC_PASSPHRASE) + .keystore("xpack.security.authc.realms.jwt.jwt3.hmac_jwkset", HMAC_JWKSET) + .keystore("xpack.security.authc.realms.jwt.jwt3.client_authentication.shared_secret", VALID_SHARED_SECRET) + .user("admin_user", "admin-password") + .user("test_file_user", "test-password", "viewer", false) + .build(); + + private static final SetOnce SERVICE_SUBJECT = new SetOnce<>(); private static Path httpCertificateAuthority; private TestSecurityClient adminSecurityClient; + private static Map realmSettings(LocalClusterSpec.LocalNodeSpec localNodeSpec) { + final boolean explicitIdTokenType = randomBoolean(); + SERVICE_SUBJECT.trySet("service_" + randomIntBetween(1, 9) + "@app" + randomIntBetween(1, 9) + ".example.com"); + + final Map settings = new HashMap<>(); + settings.put("xpack.security.authc.realms.file.admin_file.order", "0"); + + settings.put("xpack.security.authc.realms.jwt.jwt1.order", "1"); + if (explicitIdTokenType) { + settings.put("xpack.security.authc.realms.jwt.jwt1.token_type", "id_token"); + } + settings.put("xpack.security.authc.realms.jwt.jwt1.allowed_issuer", "https://issuer.example.com/"); + settings.put("xpack.security.authc.realms.jwt.jwt1.allowed_audiences", "https://audience.example.com/"); + settings.put("xpack.security.authc.realms.jwt.jwt1.claims.principal", "sub"); + settings.put("xpack.security.authc.realms.jwt.jwt1.claims.groups", "roles"); + settings.put("xpack.security.authc.realms.jwt.jwt1.claims.dn", "dn"); + settings.put("xpack.security.authc.realms.jwt.jwt1.claims.name", "name"); + settings.put("xpack.security.authc.realms.jwt.jwt1.claims.mail", "mail"); + settings.put("xpack.security.authc.realms.jwt.jwt1.required_claims.token_use", "id"); + settings.put("xpack.security.authc.realms.jwt.jwt1.required_claims.version", "2.0"); + settings.put("xpack.security.authc.realms.jwt.jwt1.client_authentication.type", "NONE"); + // Use default value (RS256) for signature algorithm + settings.put("xpack.security.authc.realms.jwt.jwt1.pkc_jwkset_path", "rsa.jwkset"); + + // Place native realm after JWT realm to verify realm chain fall-through + settings.put("xpack.security.authc.realms.native.lookup_native.order", "2"); + + settings.put("xpack.security.authc.realms.jwt.jwt2.order", "3"); + settings.put("xpack.security.authc.realms.jwt.jwt2.token_type", "access_token"); + settings.put("xpack.security.authc.realms.jwt.jwt2.fallback_claims.sub", "email"); + settings.put("xpack.security.authc.realms.jwt.jwt2.fallback_claims.aud", "scope"); + settings.put("xpack.security.authc.realms.jwt.jwt2.allowed_issuer", "my-issuer"); + settings.put("xpack.security.authc.realms.jwt.jwt2.allowed_subjects", SERVICE_SUBJECT.get()); + settings.put("xpack.security.authc.realms.jwt.jwt2.allowed_audiences", "es01,es02,es03"); + settings.put("xpack.security.authc.realms.jwt.jwt2.allowed_signature_algorithms", "HS256,HS384"); + // Both email or sub works because of fallback + if (randomBoolean()) { + settings.put("xpack.security.authc.realms.jwt.jwt2.claims.principal", "email"); + } else { + settings.put("xpack.security.authc.realms.jwt.jwt2.claims.principal", "sub"); + } + settings.put("xpack.security.authc.realms.jwt.jwt2.claim_patterns.principal", "^(.*)@[^.]*[.]example[.]com$"); + settings.put("xpack.security.authc.realms.jwt.jwt2.required_claims.token_use", "access"); + settings.put("xpack.security.authc.realms.jwt.jwt2.authorization_realms", "lookup_native"); + settings.put("xpack.security.authc.realms.jwt.jwt2.client_authentication.type", "shared_secret"); + + // Place PKI realm after JWT realm to verify realm chain fall-through + settings.put("xpack.security.authc.realms.pki.pki_realm.order", "4"); + + settings.put("xpack.security.authc.realms.jwt.jwt3.order", "5"); + if (explicitIdTokenType) { + settings.put("xpack.security.authc.realms.jwt.jwt3.token_type", "id_token"); + } + settings.put("xpack.security.authc.realms.jwt.jwt3.allowed_issuer", "jwt3-issuer"); + settings.put("xpack.security.authc.realms.jwt.jwt3.allowed_audiences", "[jwt3-audience]"); + settings.put("xpack.security.authc.realms.jwt.jwt3.allowed_signature_algorithms", "[HS384, HS512]"); + settings.put("xpack.security.authc.realms.jwt.jwt3.claims.principal", "sub"); + settings.put("xpack.security.authc.realms.jwt.jwt3.client_authentication.type", "Shared_Secret"); + + return settings; + } + @BeforeClass public static void findTrustStore() throws Exception { JwtRestIT.httpCertificateAuthority = findResource("/ssl/ca.crt"); @@ -86,9 +193,13 @@ private static Path findResource(String name) throws FileNotFoundException, URIS return path; } + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected String getProtocol() { - // Because this QA project uses https return "https"; } @@ -282,7 +393,7 @@ public void testFailureOnNonMatchingRsaSignature() throws Exception { * - uses a shared-secret for client authentication */ public void testAuthenticateWithHmacSignedJWTAndDelegatedAuthorization() throws Exception { - final String principal = System.getProperty("jwt2.service_subject"); + final String principal = SERVICE_SUBJECT.get(); final String username = getUsernameFromPrincipal(principal); final List roles = randomRoles(); final String randomMetadata = randomAlphaOfLengthBetween(6, 18); @@ -290,7 +401,7 @@ public void testAuthenticateWithHmacSignedJWTAndDelegatedAuthorization() throws try { final SignedJWT jwt = buildAndSignJwtForRealm2(principal); - final TestSecurityClient client = getSecurityClient(jwt, VALID_SHARED_SECRET); + final TestSecurityClient client = getSecurityClient(jwt, Optional.of(VALID_SHARED_SECRET)); final Map response = client.authenticate(); @@ -311,7 +422,7 @@ public void testAuthenticateWithHmacSignedJWTAndDelegatedAuthorization() throws } public void testFailureOnInvalidHMACSignature() throws Exception { - final String principal = System.getProperty("jwt2.service_subject"); + final String principal = SERVICE_SUBJECT.get(); final String username = getUsernameFromPrincipal(principal); final List roles = randomRoles(); createUser(username, roles, Map.of()); @@ -321,14 +432,14 @@ public void testFailureOnInvalidHMACSignature() throws Exception { { // This is the correct HMAC passphrase (from build.gradle) - final SignedJWT jwt = signHmacJwt(claimsSet, "test-HMAC/secret passphrase-value"); - final TestSecurityClient client = getSecurityClient(jwt, VALID_SHARED_SECRET); + final SignedJWT jwt = signHmacJwt(claimsSet, HMAC_PASSPHRASE); + final TestSecurityClient client = getSecurityClient(jwt, Optional.of(VALID_SHARED_SECRET)); assertThat(client.authenticate(), hasEntry(User.Fields.USERNAME.getPreferredName(), username)); } { // This is not the correct HMAC passphrase final SignedJWT invalidJwt = signHmacJwt(claimsSet, "invalid-HMAC-passphrase-" + randomAlphaOfLength(12)); - final TestSecurityClient client = getSecurityClient(invalidJwt, VALID_SHARED_SECRET); + final TestSecurityClient client = getSecurityClient(invalidJwt, Optional.of(VALID_SHARED_SECRET)); // This fails because the HMAC is wrong final ResponseException exception = expectThrows(ResponseException.class, client::authenticate); assertThat(exception.getResponse(), hasStatusCode(RestStatus.UNAUTHORIZED)); @@ -340,7 +451,7 @@ public void testFailureOnInvalidHMACSignature() throws Exception { } public void testFailureOnRequiredClaims() throws JOSEException, IOException { - final String principal = System.getProperty("jwt2.service_subject"); + final String principal = SERVICE_SUBJECT.get(); final String username = getUsernameFromPrincipal(principal); final List roles = randomRoles(); createUser(username, roles, Map.of()); @@ -353,7 +464,7 @@ public void testFailureOnRequiredClaims() throws JOSEException, IOException { } final JWTClaimsSet claimsSet = buildJwt(data, Instant.now(), false, false); final SignedJWT jwt = signHmacJwt(claimsSet, "test-HMAC/secret passphrase-value"); - final TestSecurityClient client = getSecurityClient(jwt, VALID_SHARED_SECRET); + final TestSecurityClient client = getSecurityClient(jwt, Optional.of(VALID_SHARED_SECRET)); final ResponseException exception = expectThrows(ResponseException.class, client::authenticate); assertThat(exception.getResponse(), hasStatusCode(RestStatus.UNAUTHORIZED)); } finally { @@ -362,10 +473,10 @@ public void testFailureOnRequiredClaims() throws JOSEException, IOException { } public void testAuthenticationFailureIfDelegatedAuthorizationFails() throws Exception { - final String principal = System.getProperty("jwt2.service_subject"); + final String principal = SERVICE_SUBJECT.get(); final String username = getUsernameFromPrincipal(principal); final SignedJWT jwt = buildAndSignJwtForRealm2(principal); - final TestSecurityClient client = getSecurityClient(jwt, VALID_SHARED_SECRET); + final TestSecurityClient client = getSecurityClient(jwt, Optional.of(VALID_SHARED_SECRET)); // This fails because we didn't create a native user final ResponseException exception = expectThrows(ResponseException.class, client::authenticate); @@ -381,7 +492,7 @@ public void testAuthenticationFailureIfDelegatedAuthorizationFails() throws Exce } public void testFailureOnInvalidClientAuthentication() throws Exception { - final String principal = System.getProperty("jwt2.service_subject"); + final String principal = SERVICE_SUBJECT.get(); final String username = getUsernameFromPrincipal(principal); final List roles = randomRoles(); createUser(username, roles, Map.of()); @@ -410,7 +521,7 @@ public void testFailureOnInvalidClientAuthentication() throws Exception { public void testAuthenticateWithHmacSignedJWTAndMissingRoleMapping() throws Exception { final String principal = randomPrincipal(); final SignedJWT jwt = buildAndSignJwtForRealm3(principal); - final TestSecurityClient client = getSecurityClient(jwt, VALID_SHARED_SECRET); + final TestSecurityClient client = getSecurityClient(jwt, Optional.of(VALID_SHARED_SECRET)); final Map response = client.authenticate(); diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java index f7b8615413db4..1de322fef27de 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityApiKeyRestIT.java @@ -328,7 +328,7 @@ public void testCrossClusterSearchWithApiKey() throws Exception { () -> performRequestWithApiKey(new Request("GET", "/invalid_remote:index1/_search"), apiKeyEncoded) ); assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat(exception4.getMessage(), containsString("unable to authenticate user ")); + assertThat(exception4.getMessage(), containsString("unable to find apikey")); } } diff --git a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java index 6f1993d9b31e1..40bbc82afd211 100644 --- a/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java +++ b/x-pack/plugin/security/qa/multi-cluster/src/javaRestTest/java/org/elasticsearch/xpack/remotecluster/RemoteClusterSecurityRestIT.java @@ -37,7 +37,6 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import static org.hamcrest.Matchers.allOf; import static org.hamcrest.Matchers.anEmptyMap; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.containsString; @@ -339,10 +338,7 @@ public void testCrossClusterSearch() throws Exception { () -> performRequestWithRemoteSearchUser(new Request("GET", "/invalid_remote:index1/_search")) ); assertThat(exception4.getResponse().getStatusLine().getStatusCode(), equalTo(401)); - assertThat( - exception4.getMessage(), - allOf(containsString("unable to authenticate user "), containsString("unable to find apikey")) - ); + assertThat(exception4.getMessage(), containsString("unable to find apikey")); // check that REST API key is not supported by cross cluster access updateClusterSettings( diff --git a/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java b/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java index fc25bda6d89b8..f2f753e9bb3ab 100644 --- a/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java +++ b/x-pack/plugin/security/qa/profile/src/javaRestTest/java/org/elasticsearch/xpack/security/profile/ProfileIT.java @@ -443,14 +443,18 @@ public void testGetUsersWithProfileUid() throws IOException { final Request putUserRequest = new Request("PUT", "_security/user/" + username); putUserRequest.setJsonEntity("{\"password\":\"x-pack-test-password\",\"roles\":[\"superuser\"]}"); assertOK(adminClient().performRequest(putUserRequest)); - final Map profile = doActivateProfile(username, "x-pack-test-password"); + // Get user with profile uid before profile index exists will not show any profile_uid final Request getUserRequest = new Request("GET", "_security/user" + (randomBoolean() ? "/" + username : "")); getUserRequest.addParameter("with_profile_uid", "true"); - final Response getUserResponse = adminClient().performRequest(getUserRequest); - assertOK(getUserResponse); + final Response getUserResponse1 = adminClient().performRequest(getUserRequest); + assertOK(getUserResponse1); + responseAsMap(getUserResponse1).forEach((k, v) -> assertThat(castToMap(v), not(hasKey("profile_uid")))); - responseAsMap(getUserResponse).forEach((k, v) -> { + // The profile_uid is retrieved for the user after the profile gets activated + final Map profile = doActivateProfile(username, "x-pack-test-password"); + final Response getUserResponse2 = adminClient().performRequest(getUserRequest); + responseAsMap(getUserResponse2).forEach((k, v) -> { if (username.equals(k)) { assertThat(castToMap(v).get("profile_uid"), equalTo(profile.get("uid"))); } else { diff --git a/x-pack/plugin/security/qa/security-basic/build.gradle b/x-pack/plugin/security/qa/security-basic/build.gradle index 10a8081789652..f1fa2dabfb1f6 100644 --- a/x-pack/plugin/security/qa/security-basic/build.gradle +++ b/x-pack/plugin/security/qa/security-basic/build.gradle @@ -1,5 +1,5 @@ -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' import org.elasticsearch.gradle.internal.info.BuildParams @@ -9,27 +9,12 @@ dependencies { javaRestTestImplementation project(":client:rest-high-level") } -if (BuildParams.inFipsJvm){ - // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC - tasks.named("javaRestTest").configure{enabled = false } +tasks.named('javaRestTest') { + usesDefaultDistribution() } -testClusters.configureEach { - testDistribution = 'DEFAULT' - numberOfNodes = 2 - - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'basic' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.ssl.diagnose.trust', 'true' - setting 'xpack.security.http.ssl.enabled', 'false' - setting 'xpack.security.transport.ssl.enabled', 'false' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.authc.api_key.enabled', 'true' - rolesFile file('src/javaRestTest/resources/roles.yml') - user username: "admin_user", password: "admin-password" - user username: "security_test_user", password: "security-test-password", role: "security_test_role" - user username: "api_key_admin", password: "security-test-password", role: "api_key_admin_role" - user username: "api_key_user", password: "security-test-password", role: "api_key_user_role" +if (BuildParams.inFipsJvm){ + // This test cluster is using a BASIC license and FIPS 140 mode is not supported in BASIC + tasks.named("javaRestTest").configure{enabled = false } } diff --git a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java index 05a8d777dca0a..5843350e36457 100644 --- a/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java +++ b/x-pack/plugin/security/qa/security-basic/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityInBasicRestTestCase.java @@ -10,19 +10,59 @@ import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.local.model.User; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; +import org.junit.ClassRule; public abstract class SecurityInBasicRestTestCase extends ESRestTestCase { + protected static final String REST_USER = "security_test_user"; + private static final SecureString REST_PASSWORD = new SecureString("security-test-password".toCharArray()); + + private static final String ADMIN_USER = "admin_user"; + private static final SecureString ADMIN_PASSWORD = new SecureString("admin-password".toCharArray()); + + protected static final String API_KEY_USER = "api_key_user"; + private static final SecureString API_KEY_USER_PASSWORD = new SecureString("security-test-password".toCharArray()); + + protected static final String API_KEY_ADMIN_USER = "api_key_admin"; + private static final SecureString API_KEY_ADMIN_USER_PASSWORD = new SecureString("security-test-password".toCharArray()); + + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(2) + .distribution(DistributionType.DEFAULT) + .setting("xpack.license.self_generated.type", "basic") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.ssl.diagnose.trust", "true") + .setting("xpack.security.http.ssl.enabled", "false") + .setting("xpack.security.transport.ssl.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user(ADMIN_USER, ADMIN_PASSWORD.toString(), User.ROOT_USER_ROLE, true) + .user(REST_USER, REST_PASSWORD.toString(), "security_test_role", false) + .user(API_KEY_USER, API_KEY_USER_PASSWORD.toString(), "api_key_user_role", false) + .user(API_KEY_ADMIN_USER, API_KEY_ADMIN_USER_PASSWORD.toString(), "api_key_admin_role", false) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restAdminSettings() { - String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); + String token = basicAuthHeaderValue(ADMIN_USER, ADMIN_PASSWORD); return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } @Override protected Settings restClientSettings() { - String token = basicAuthHeaderValue("security_test_user", new SecureString("security-test-password".toCharArray())); + String token = basicAuthHeaderValue(REST_USER, REST_PASSWORD); return Settings.builder().put(ThreadContext.PREFIX + ".Authorization", token).build(); } diff --git a/x-pack/plugin/security/qa/security-trial/build.gradle b/x-pack/plugin/security/qa/security-trial/build.gradle index aca672e6ecd93..991e1623f2f35 100644 --- a/x-pack/plugin/security/qa/security-trial/build.gradle +++ b/x-pack/plugin/security/qa/security-trial/build.gradle @@ -1,6 +1,6 @@ import org.elasticsearch.gradle.Version -apply plugin: 'elasticsearch.legacy-java-rest-test' +apply plugin: 'elasticsearch.internal-java-rest-test' dependencies { javaRestTestImplementation project(path: xpackModule('core')) @@ -9,28 +9,7 @@ dependencies { javaRestTestImplementation project(":client:rest-high-level") } -testClusters.matching { it.name == 'javaRestTest' }.configureEach { - testDistribution = 'DEFAULT' - numberOfNodes = 2 - - setting 'xpack.ml.enabled', 'false' - setting 'xpack.license.self_generated.type', 'trial' - setting 'xpack.security.enabled', 'true' - setting 'xpack.security.ssl.diagnose.trust', 'true' - setting 'xpack.security.http.ssl.enabled', 'false' - setting 'xpack.security.transport.ssl.enabled', 'false' - setting 'xpack.security.authc.token.enabled', 'true' - setting 'xpack.security.authc.api_key.enabled', 'true' - setting 'xpack.security.remote_cluster_client.ssl.enabled', 'false' - - keystore 'cluster.remote.my_remote_cluster_a.credentials', 'cluster_a_credentials' - keystore 'cluster.remote.my_remote_cluster_b.credentials', 'cluster_b_credentials' - keystore 'cluster.remote.my_remote_cluster_a_1.credentials', 'cluster_a_credentials' - keystore 'cluster.remote.my_remote_cluster_a_2.credentials', 'cluster_a_credentials' - - rolesFile file('src/javaRestTest/resources/roles.yml') - user username: "admin_user", password: "admin-password" - user username: "security_test_user", password: "security-test-password", role: "security_test_role" - user username: "x_pack_rest_user", password: "x-pack-test-password" - user username: "cat_test_user", password: "cat-test-password", role: "cat_test_role" +tasks.named('javaRestTest') { + usesDefaultDistribution() } + diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java index be222046f2ce5..3ad250c4e6037 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/security/SecurityOnTrialLicenseRestTestCase.java @@ -12,19 +12,54 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.TestSecurityClient; +import org.elasticsearch.test.cluster.ElasticsearchCluster; +import org.elasticsearch.test.cluster.local.distribution.DistributionType; +import org.elasticsearch.test.cluster.util.resource.Resource; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.security.action.apikey.ApiKey; import org.elasticsearch.xpack.core.security.authc.support.UsernamePasswordToken; import org.elasticsearch.xpack.core.security.authz.RoleDescriptor; import org.elasticsearch.xpack.core.security.user.User; +import org.junit.ClassRule; import java.io.IOException; import java.util.Collection; import java.util.List; +import static org.elasticsearch.test.cluster.local.model.User.ROOT_USER_ROLE; + public abstract class SecurityOnTrialLicenseRestTestCase extends ESRestTestCase { private TestSecurityClient securityClient; + @ClassRule + public static ElasticsearchCluster cluster = ElasticsearchCluster.local() + .nodes(2) + .distribution(DistributionType.DEFAULT) + .setting("xpack.ml.enabled", "false") + .setting("xpack.license.self_generated.type", "trial") + .setting("xpack.security.enabled", "true") + .setting("xpack.security.ssl.diagnose.trust", "true") + .setting("xpack.security.http.ssl.enabled", "false") + .setting("xpack.security.transport.ssl.enabled", "false") + .setting("xpack.security.authc.token.enabled", "true") + .setting("xpack.security.authc.api_key.enabled", "true") + .setting("xpack.security.remote_cluster_client.ssl.enabled", "false") + .keystore("cluster.remote.my_remote_cluster_a.credentials", "cluster_a_credentials") + .keystore("cluster.remote.my_remote_cluster_b.credentials", "cluster_b_credentials") + .keystore("cluster.remote.my_remote_cluster_a_1.credentials", "cluster_a_credentials") + .keystore("cluster.remote.my_remote_cluster_a_2.credentials", "cluster_a_credentials") + .rolesFile(Resource.fromClasspath("roles.yml")) + .user("admin_user", "admin-password", ROOT_USER_ROLE, true) + .user("security_test_user", "security-test-password", "security_test_role", false) + .user("x_pack_rest_user", "x-pack-test-password", ROOT_USER_ROLE, true) + .user("cat_test_user", "cat-test-password", "cat_test_role", false) + .build(); + + @Override + protected String getTestRestCluster() { + return cluster.getHttpAddresses(); + } + @Override protected Settings restAdminSettings() { String token = basicAuthHeaderValue("admin_user", new SecureString("admin-password".toCharArray())); diff --git a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/test/rest/CatIndicesWithSecurityIT.java b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/test/rest/CatIndicesWithSecurityIT.java index 4b08f046d69c7..38cda5bbed0f2 100644 --- a/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/test/rest/CatIndicesWithSecurityIT.java +++ b/x-pack/plugin/security/qa/security-trial/src/javaRestTest/java/org/elasticsearch/xpack/test/rest/CatIndicesWithSecurityIT.java @@ -14,13 +14,14 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.test.SecuritySettingsSourceField; -import org.elasticsearch.test.rest.ESRestTestCase; +import org.elasticsearch.xpack.security.SecurityOnTrialLicenseRestTestCase; import java.io.IOException; import static org.hamcrest.Matchers.matchesRegex; -public class CatIndicesWithSecurityIT extends ESRestTestCase { +public class CatIndicesWithSecurityIT extends SecurityOnTrialLicenseRestTestCase { + @Override protected Settings restAdminSettings() { String token = basicAuthHeaderValue("x_pack_rest_user", SecuritySettingsSourceField.TEST_PASSWORD_SECURE_STRING); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java new file mode 100644 index 0000000000000..cb3a032f44127 --- /dev/null +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/integration/DataStreamLifecycleDownsamplingSecurityIT.java @@ -0,0 +1,495 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.integration; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.DocWriteRequest; +import org.elasticsearch.action.admin.indices.rollover.RolloverAction; +import org.elasticsearch.action.admin.indices.rollover.RolloverRequest; +import org.elasticsearch.action.admin.indices.template.put.PutComposableIndexTemplateAction; +import org.elasticsearch.action.bulk.BulkItemResponse; +import org.elasticsearch.action.bulk.BulkRequestBuilder; +import org.elasticsearch.action.bulk.BulkResponse; +import org.elasticsearch.action.datastreams.GetDataStreamAction; +import org.elasticsearch.action.downsample.DownsampleConfig; +import org.elasticsearch.action.index.IndexRequest; +import org.elasticsearch.action.support.WriteRequest; +import org.elasticsearch.client.internal.Client; +import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; +import org.elasticsearch.cluster.metadata.DataStreamLifecycle; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Template; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.compress.CompressedXContent; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.time.DateFormatter; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.datastreams.DataStreamsPlugin; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleErrorStore; +import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService; +import org.elasticsearch.datastreams.lifecycle.action.PutDataStreamLifecycleAction; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexMode; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.VersionConflictEngineException; +import org.elasticsearch.index.mapper.extras.MapperExtrasPlugin; +import org.elasticsearch.indices.ExecutorNames; +import org.elasticsearch.indices.SystemDataStreamDescriptor; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.SystemIndexPlugin; +import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; +import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin; +import org.elasticsearch.xpack.downsample.Downsample; +import org.elasticsearch.xpack.security.LocalStateSecurity; +import org.elasticsearch.xpack.wildcard.Wildcard; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo; +import static org.elasticsearch.xcontent.XContentFactory.jsonBuilder; +import static org.hamcrest.Matchers.anyOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.not; + +/** + * This test suite ensures that data stream lifecycle runtime tasks work correctly with security enabled, i.e., that the internal user for + * data stream lifecycle has all requisite privileges to orchestrate the data stream lifecycle + * This class focuses on the donwsampling execution. + */ +public class DataStreamLifecycleDownsamplingSecurityIT extends SecurityIntegTestCase { + private static final Logger logger = LogManager.getLogger(DataStreamLifecycleDownsamplingSecurityIT.class); + + private static final DateFormatter DATE_FORMATTER = DateFormatter.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); + public static final String FIELD_TIMESTAMP = "@timestamp"; + public static final String FIELD_DIMENSION_1 = "dimension_kw"; + public static final String FIELD_DIMENSION_2 = "dimension_long"; + public static final String FIELD_METRIC_COUNTER = "counter"; + + @Override + protected Collection> nodePlugins() { + return List.of( + LocalStateSecurity.class, + DataStreamsPlugin.class, + SystemDataStreamTestPlugin.class, + MapperExtrasPlugin.class, + Wildcard.class, + Downsample.class, + AggregateMetricMapperPlugin.class + ); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { + Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); + settings.put(DataStreamLifecycleService.DATA_STREAM_LIFECYCLE_POLL_INTERVAL, "1s"); + return settings.build(); + } + + @TestLogging(value = "org.elasticsearch.datastreams.lifecycle:TRACE", reason = "debugging") + public void testDownsamplingAuthorized() throws Exception { + String dataStreamName = "metrics-foo"; + + DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() + .downsampling( + new DataStreamLifecycle.Downsampling( + List.of( + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueMillis(0), + new DownsampleConfig(new DateHistogramInterval("1s")) + ), + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueSeconds(10), + new DownsampleConfig(new DateHistogramInterval("10s")) + ) + ) + ) + ) + .build(); + + setupDataStreamAndIngestDocs(client(), dataStreamName, lifecycle, 10_000); + waitAndAssertDownsamplingCompleted(dataStreamName); + } + + public void testConfiguringLifecycleWithDownsamplingForSystemDataStreamFails() { + String dataStreamName = SystemDataStreamTestPlugin.SYSTEM_DATA_STREAM_NAME; + indexDocuments(client(), dataStreamName, 100); + DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() + .downsampling( + new DataStreamLifecycle.Downsampling( + List.of( + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueMillis(0), + new DownsampleConfig(new DateHistogramInterval("1s")) + ), + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueSeconds(10), + new DownsampleConfig(new DateHistogramInterval("10s")) + ) + ) + ) + ) + .build(); + IllegalArgumentException illegalArgumentException = expectThrows( + IllegalArgumentException.class, + () -> client().execute( + PutDataStreamLifecycleAction.INSTANCE, + new PutDataStreamLifecycleAction.Request(new String[] { dataStreamName }, lifecycle) + ).actionGet() + ); + assertThat( + illegalArgumentException.getMessage(), + is( + "System data streams do not support downsampling as part of their lifecycle " + + "configuration. Encountered [" + + dataStreamName + + "] in the request" + ) + ); + } + + public void testExplicitSystemDataStreamConfigurationWithDownsamplingFails() { + SystemDataStreamWithDownsamplingConfigurationPlugin pluginWithIllegalSystemDataStream = + new SystemDataStreamWithDownsamplingConfigurationPlugin(); + IllegalArgumentException illegalArgumentException = expectThrows( + IllegalArgumentException.class, + () -> pluginWithIllegalSystemDataStream.getSystemDataStreamDescriptors() + ); + assertThat( + illegalArgumentException.getMessage(), + is("System data streams do not support downsampling as part of their lifecycle configuration") + ); + } + + private void waitAndAssertDownsamplingCompleted(String dataStreamName) throws Exception { + List backingIndices = getDataStreamBackingIndices(dataStreamName); + String firstGenerationBackingIndex = backingIndices.get(0).getName(); + String oneSecondDownsampleIndex = "downsample-1s-" + firstGenerationBackingIndex; + String tenSecondsDownsampleIndex = "downsample-10s-" + firstGenerationBackingIndex; + + Set witnessedDownsamplingIndices = new HashSet<>(); + clusterService().addListener(event -> { + if (event.indicesCreated().contains(oneSecondDownsampleIndex) + || event.indicesDeleted().stream().anyMatch(index -> index.getName().equals(oneSecondDownsampleIndex))) { + witnessedDownsamplingIndices.add(oneSecondDownsampleIndex); + } + if (event.indicesCreated().contains(tenSecondsDownsampleIndex)) { + witnessedDownsamplingIndices.add(tenSecondsDownsampleIndex); + } + }); + + client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet(); + + assertBusy(() -> { + assertNoAuthzErrors(); + // first downsampling round + assertThat(witnessedDownsamplingIndices.contains(oneSecondDownsampleIndex), is(true)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + assertNoAuthzErrors(); + assertThat(witnessedDownsamplingIndices.size(), is(2)); + assertThat(witnessedDownsamplingIndices.contains(oneSecondDownsampleIndex), is(true)); + + assertThat(witnessedDownsamplingIndices.contains(tenSecondsDownsampleIndex), is(true)); + }, 30, TimeUnit.SECONDS); + + assertBusy(() -> { + assertNoAuthzErrors(); + List dsBackingIndices = getDataStreamBackingIndices(dataStreamName); + + assertThat(dsBackingIndices.size(), is(2)); + String writeIndex = dsBackingIndices.get(1).getName(); + assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2)); + // the last downsampling round must remain in the data stream + assertThat(dsBackingIndices.get(0).getName(), is(tenSecondsDownsampleIndex)); + assertThat(indexExists(firstGenerationBackingIndex), is(false)); + assertThat(indexExists(oneSecondDownsampleIndex), is(false)); + }, 30, TimeUnit.SECONDS); + } + + private Map collectErrorsFromStoreAsMap() { + Iterable lifecycleServices = internalCluster().getInstances(DataStreamLifecycleService.class); + Map indicesAndErrors = new HashMap<>(); + for (DataStreamLifecycleService lifecycleService : lifecycleServices) { + DataStreamLifecycleErrorStore errorStore = lifecycleService.getErrorStore(); + List allIndices = errorStore.getAllIndices(); + for (var index : allIndices) { + indicesAndErrors.put(index, errorStore.getError(index)); + } + } + return indicesAndErrors; + } + + private List getDataStreamBackingIndices(String dataStreamName) { + GetDataStreamAction.Request getDataStreamRequest = new GetDataStreamAction.Request(new String[] { dataStreamName }); + GetDataStreamAction.Response getDataStreamResponse = client().execute(GetDataStreamAction.INSTANCE, getDataStreamRequest) + .actionGet(); + assertThat(getDataStreamResponse.getDataStreams().size(), equalTo(1)); + assertThat(getDataStreamResponse.getDataStreams().get(0).getDataStream().getName(), equalTo(dataStreamName)); + return getDataStreamResponse.getDataStreams().get(0).getDataStream().getIndices(); + } + + private void assertNoAuthzErrors() { + var indicesAndErrors = collectErrorsFromStoreAsMap(); + for (var entry : indicesAndErrors.entrySet()) { + assertThat( + "unexpected authz error for index [" + entry.getKey() + "] with error message [" + entry.getValue() + "]", + entry.getValue(), + not(anyOf(containsString("security_exception"), containsString("unauthorized for user [_data_stream_lifecycle]"))) + ); + } + } + + private void setupDataStreamAndIngestDocs(Client client, String dataStreamName, DataStreamLifecycle lifecycle, int docCount) + throws IOException { + putTSDBIndexTemplate(client, dataStreamName + "*", lifecycle); + indexDocuments(client, dataStreamName, docCount); + } + + private void putTSDBIndexTemplate(Client client, String pattern, DataStreamLifecycle lifecycle) throws IOException { + Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); + CompressedXContent mapping = getTSDBMappings(); + putComposableIndexTemplate(client, "id1", mapping, List.of(pattern), settings.build(), null, lifecycle); + } + + private static CompressedXContent getTSDBMappings() throws IOException { + XContentBuilder mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties"); + mapping.startObject(FIELD_TIMESTAMP).field("type", "date").endObject(); + + mapping.startObject(FIELD_DIMENSION_1).field("type", "keyword").field("time_series_dimension", true).endObject(); + mapping.startObject(FIELD_DIMENSION_2).field("type", "long").field("time_series_dimension", true).endObject(); + + mapping.startObject(FIELD_METRIC_COUNTER) + .field("type", "double") /* numeric label indexed as a metric */ + .field("time_series_metric", "counter") + .endObject(); + + mapping.endObject().endObject().endObject(); + return CompressedXContent.fromJSON(Strings.toString(mapping)); + } + + private void putComposableIndexTemplate( + Client client, + String id, + @Nullable CompressedXContent mappings, + List patterns, + @Nullable Settings settings, + @Nullable Map metadata, + @Nullable DataStreamLifecycle lifecycle + ) { + PutComposableIndexTemplateAction.Request request = new PutComposableIndexTemplateAction.Request(id); + request.indexTemplate( + new ComposableIndexTemplate( + patterns, + new Template(settings, mappings, null, lifecycle), + null, + null, + null, + metadata, + new ComposableIndexTemplate.DataStreamTemplate(), + null + ) + ); + client.execute(PutComposableIndexTemplateAction.INSTANCE, request).actionGet(); + } + + private void indexDocuments(Client client, String dataStreamName, int docCount) { + final Supplier sourceSupplier = () -> { + final String ts = randomDateForInterval(new DateHistogramInterval("1s"), System.currentTimeMillis()); + double counterValue = DATE_FORMATTER.parseMillis(ts); + final List dimensionValues = new ArrayList<>(5); + for (int j = 0; j < randomIntBetween(1, 5); j++) { + dimensionValues.add(randomAlphaOfLength(6)); + } + try { + return XContentFactory.jsonBuilder() + .startObject() + .field(FIELD_TIMESTAMP, ts) + .field(FIELD_DIMENSION_1, randomFrom(dimensionValues)) + .field(FIELD_DIMENSION_2, randomIntBetween(1, 10)) + .field(FIELD_METRIC_COUNTER, counterValue) + .endObject(); + } catch (IOException e) { + throw new RuntimeException(e); + } + }; + bulkIndex(client, dataStreamName, sourceSupplier, docCount); + } + + private String randomDateForInterval(final DateHistogramInterval interval, final long startTime) { + long endTime = startTime + 10 * interval.estimateMillis(); + return randomDateForRange(startTime, endTime); + } + + private String randomDateForRange(long start, long end) { + return DATE_FORMATTER.formatMillis(randomLongBetween(start, end)); + } + + private void bulkIndex(Client client, String dataStreamName, Supplier docSourceSupplier, int docCount) { + BulkRequestBuilder bulkRequestBuilder = client.prepareBulk(); + bulkRequestBuilder.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE); + for (int i = 0; i < docCount; i++) { + IndexRequest indexRequest = new IndexRequest(dataStreamName).opType(DocWriteRequest.OpType.CREATE); + XContentBuilder source = docSourceSupplier.get(); + indexRequest.source(source); + bulkRequestBuilder.add(indexRequest); + } + BulkResponse bulkResponse = bulkRequestBuilder.get(); + int duplicates = 0; + for (BulkItemResponse response : bulkResponse.getItems()) { + if (response.isFailed()) { + if (response.getFailure().getCause() instanceof VersionConflictEngineException) { + // A duplicate event was created by random generator. We should not fail for this + // reason. + logger.debug("-> failed to insert a duplicate: [{}]", response.getFailureMessage()); + duplicates++; + } else { + throw new ElasticsearchException("Failed to index data: " + bulkResponse.buildFailureMessage()); + } + } + } + int docsIndexed = docCount - duplicates; + logger.info("-> Indexed [{}] documents. Dropped [{}] duplicates.", docsIndexed, duplicates); + } + + public static class SystemDataStreamTestPlugin extends Plugin implements SystemIndexPlugin { + + static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; + + @Override + public Collection getSystemDataStreamDescriptors() { + Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); + + try { + return List.of( + new SystemDataStreamDescriptor( + SYSTEM_DATA_STREAM_NAME, + "a system data stream for testing", + SystemDataStreamDescriptor.Type.EXTERNAL, + new ComposableIndexTemplate( + List.of(SYSTEM_DATA_STREAM_NAME), + new Template(settings.build(), getTSDBMappings(), null, null), + null, + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ), + Map.of(), + Collections.singletonList("test"), + new ExecutorNames( + ThreadPool.Names.SYSTEM_CRITICAL_READ, + ThreadPool.Names.SYSTEM_READ, + ThreadPool.Names.SYSTEM_WRITE + ) + ) + ); + } catch (IOException e) { + throw new RuntimeException("Unable to create system data stream descriptor", e); + } + } + + @Override + public String getFeatureName() { + return SystemDataStreamTestPlugin.class.getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "A plugin for testing the data stream lifecycle runtime actions on system data streams"; + } + } + + public static class SystemDataStreamWithDownsamplingConfigurationPlugin extends Plugin implements SystemIndexPlugin { + + static final String SYSTEM_DATA_STREAM_NAME = ".fleet-actions-results"; + + @Override + public Collection getSystemDataStreamDescriptors() { + DataStreamLifecycle lifecycle = DataStreamLifecycle.newBuilder() + .downsampling( + new DataStreamLifecycle.Downsampling( + List.of( + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueMillis(0), + new DownsampleConfig(new DateHistogramInterval("1s")) + ), + new DataStreamLifecycle.Downsampling.Round( + TimeValue.timeValueSeconds(10), + new DownsampleConfig(new DateHistogramInterval("10s")) + ) + ) + ) + ) + .build(); + + Settings.Builder settings = indexSettings(1, 0).put(IndexSettings.MODE.getKey(), IndexMode.TIME_SERIES) + .putList(IndexMetadata.INDEX_ROUTING_PATH.getKey(), List.of(FIELD_DIMENSION_1)); + + try { + return List.of( + new SystemDataStreamDescriptor( + SYSTEM_DATA_STREAM_NAME, + "a system data stream for testing", + SystemDataStreamDescriptor.Type.EXTERNAL, + new ComposableIndexTemplate( + List.of(SYSTEM_DATA_STREAM_NAME), + new Template(settings.build(), getTSDBMappings(), null, lifecycle), + null, + null, + null, + null, + new ComposableIndexTemplate.DataStreamTemplate() + ), + Map.of(), + Collections.singletonList("test"), + new ExecutorNames( + ThreadPool.Names.SYSTEM_CRITICAL_READ, + ThreadPool.Names.SYSTEM_READ, + ThreadPool.Names.SYSTEM_WRITE + ) + ) + ); + } catch (IOException e) { + throw new RuntimeException("Unable to create system data stream descriptor", e); + } + } + + @Override + public String getFeatureName() { + return SystemDataStreamTestPlugin.class.getSimpleName(); + } + + @Override + public String getFeatureDescription() { + return "A plugin for testing the data stream lifecycle runtime actions on system data streams"; + } + } +} diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java index b359824505e5f..a9cded1783cac 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/transport/SecurityServerTransportServiceTests.java @@ -22,13 +22,7 @@ public void testSecurityServerTransportServiceWrapsAllHandlers() { + SecurityServerTransportInterceptor.ProfileSecuredRequestHandler.class + "; do all the handler registration methods have overrides?", handler.toString(), - "ProfileSecuredRequestHandler{action='" - + handler.getAction() - + "', executorName='" - + handler.getExecutor() - + "', forceExecution=" - + handler.isForceExecution() - + "}" + "ProfileSecuredRequestHandler{action='" + handler.getAction() + "', forceExecution=" + handler.isForceExecution() + "}" ); } } diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java index cea831ea6d0a9..7b02495c7227b 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/FileSettingsRoleMappingsStartupIT.java @@ -22,6 +22,7 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.InternalSettingsPlugin; import org.elasticsearch.test.SecurityIntegTestCase; +import org.elasticsearch.test.junit.annotations.TestLogging; import org.elasticsearch.transport.netty4.Netty4Plugin; import org.elasticsearch.xpack.wildcard.Wildcard; @@ -111,10 +112,12 @@ public void clusterChanged(ClusterChangedEvent event) { return new Tuple<>(savedClusterState, metadataVersion); } + @TestLogging(value = "org.elasticsearch.common.file:DEBUG", reason = "https://github.com/elastic/elasticsearch/issues/98391") public void testFailsOnStartMasterNodeWithError() throws Exception { internalCluster().setBootstrapMasterNodeIndex(0); internalCluster().startMasterOnlyNode(); + logger.info("--> write some role mappings, no other file settings"); writeJSONFile(internalCluster().getMasterName(), testJSONForFailedCase); var savedClusterState = setupClusterStateListenerForError(internalCluster().getMasterName()); diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java index d970ff034438c..9d9725ed16ed8 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/authc/TokenAuthIntegTests.java @@ -8,6 +8,7 @@ import org.apache.http.client.methods.HttpPost; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.action.support.WriteRequest; @@ -323,8 +324,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { ResponseException.class, () -> invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersion.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersion.V_7_3_2, randomBoolean()).v1() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() ) ) ); @@ -343,7 +344,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] longerAccessToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersion.V_7_3_2, longerAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, longerAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -361,7 +362,7 @@ public void testInvalidateNotValidAccessTokens() throws Exception { byte[] shorterAccessToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterAccessToken); invalidateResponse = invalidateAccessToken( - tokenService.prependVersionAndEncodeAccessToken(TransportVersion.V_7_3_2, shorterAccessToken) + tokenService.prependVersionAndEncodeAccessToken(TransportVersions.V_7_3_2, shorterAccessToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -390,8 +391,8 @@ public void testInvalidateNotValidAccessTokens() throws Exception { invalidateResponse = invalidateAccessToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersion.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersion.V_7_3_2, randomBoolean()).v1() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, randomBoolean()).v1() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -416,8 +417,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { ResponseException.class, () -> invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersion.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersion.V_7_3_2, true).v2() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() ) ) ); @@ -437,7 +438,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] longerRefreshToken = new byte[randomIntBetween(17, 24)]; random().nextBytes(longerRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersion.V_7_3_2, longerRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, longerRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -455,7 +456,7 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { byte[] shorterRefreshToken = new byte[randomIntBetween(12, 15)]; random().nextBytes(shorterRefreshToken); invalidateResponse = invalidateRefreshToken( - TokenService.prependVersionAndEncodeRefreshToken(TransportVersion.V_7_3_2, shorterRefreshToken) + TokenService.prependVersionAndEncodeRefreshToken(TransportVersions.V_7_3_2, shorterRefreshToken) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); assertThat(invalidateResponse.previouslyInvalidated(), equalTo(0)); @@ -484,8 +485,8 @@ public void testInvalidateNotValidRefreshTokens() throws Exception { invalidateResponse = invalidateRefreshToken( TokenService.prependVersionAndEncodeRefreshToken( - TransportVersion.V_7_3_2, - tokenService.getRandomTokenBytes(TransportVersion.V_7_3_2, true).v2() + TransportVersions.V_7_3_2, + tokenService.getRandomTokenBytes(TransportVersions.V_7_3_2, true).v2() ) ); assertThat(invalidateResponse.invalidated(), equalTo(0)); @@ -755,15 +756,15 @@ public void testAuthenticateWithWrongToken() throws Exception { // Now attempt to authenticate with an invalid access token with valid structure (pre 7.2) assertUnauthorizedToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersion.V_7_1_0, - tokenService.getRandomTokenBytes(TransportVersion.V_7_1_0, randomBoolean()).v1() + TransportVersions.V_7_1_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() ) ); // Now attempt to authenticate with an invalid access token with valid structure (after 7.2 pre 8.10) assertUnauthorizedToken( tokenService.prependVersionAndEncodeAccessToken( - TransportVersion.V_7_4_0, - tokenService.getRandomTokenBytes(TransportVersion.V_7_4_0, randomBoolean()).v1() + TransportVersions.V_7_4_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_4_0, randomBoolean()).v1() ) ); // Now attempt to authenticate with an invalid access token with valid structure (current version) diff --git a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java index e698b9de59079..165235181de41 100644 --- a/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java +++ b/x-pack/plugin/security/src/internalClusterTest/java/org/elasticsearch/xpack/security/profile/ProfileIntegTests.java @@ -834,6 +834,17 @@ public void testGetUsersWithProfileUid() throws IOException { ); } + public void testGetUsersWithProfileUidWhenProfileIndexDoesNotExists() { + final GetUsersRequest getUsersRequest = new GetUsersRequest(); + getUsersRequest.setWithProfileUid(true); + if (randomBoolean()) { + getUsersRequest.usernames(ElasticUser.NAME, RAC_USER_NAME); + } + final GetUsersResponse getUsersResponse = client().execute(GetUsersAction.INSTANCE, getUsersRequest).actionGet(); + // When profile index does not exist, profile lookup is null + assertThat(getUsersResponse.getProfileUidLookup(), nullValue()); + } + private SuggestProfilesResponse.ProfileHit[] doSuggest(String name) { return doSuggest(name, Set.of()); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java index 1492dcaf687dc..ea9e7059c7ea8 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/core/security/transport/netty4/SecurityNetty4Transport.java @@ -19,6 +19,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.network.NetworkService; @@ -26,10 +27,13 @@ import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.ConnectTransportException; import org.elasticsearch.transport.ConnectionProfile; +import org.elasticsearch.transport.Header; +import org.elasticsearch.transport.HeaderValidationException; import org.elasticsearch.transport.InboundAggregator; import org.elasticsearch.transport.InboundDecoder; import org.elasticsearch.transport.InboundPipeline; @@ -42,6 +46,7 @@ import org.elasticsearch.xpack.core.security.transport.ProfileConfigurations; import org.elasticsearch.xpack.core.security.transport.SecurityTransportExceptionHandler; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import java.net.InetSocketAddress; import java.net.SocketAddress; @@ -53,6 +58,7 @@ import javax.net.ssl.SSLEngine; import javax.net.ssl.SSLParameters; +import static org.elasticsearch.transport.InboundDecoder.ChannelType.SERVER; import static org.elasticsearch.transport.RemoteClusterPortSettings.REMOTE_CLUSTER_PROFILE; import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_CLIENT_SSL_ENABLED; import static org.elasticsearch.xpack.core.XPackSettings.REMOTE_CLUSTER_CLIENT_SSL_PREFIX; @@ -72,6 +78,7 @@ public class SecurityNetty4Transport extends Netty4Transport { private final boolean remoteClusterServerSslEnabled; private final SslConfiguration remoteClusterClientSslConfiguration; private final RemoteClusterClientBootstrapOptions remoteClusterClientBootstrapOptions; + private final CrossClusterAccessAuthenticationService crossClusterAccessAuthenticationService; public SecurityNetty4Transport( final Settings settings, @@ -82,7 +89,8 @@ public SecurityNetty4Transport( final NamedWriteableRegistry namedWriteableRegistry, final CircuitBreakerService circuitBreakerService, final SSLService sslService, - final SharedGroupFactory sharedGroupFactory + final SharedGroupFactory sharedGroupFactory, + final CrossClusterAccessAuthenticationService crossClusterAccessAuthenticationService ) { super( settings, @@ -94,6 +102,7 @@ public SecurityNetty4Transport( circuitBreakerService, sharedGroupFactory ); + this.crossClusterAccessAuthenticationService = crossClusterAccessAuthenticationService; this.exceptionHandler = new SecurityTransportExceptionHandler(logger, lifecycle, (c, e) -> super.onException(c, e)); this.sslService = sslService; this.transportSslEnabled = XPackSettings.TRANSPORT_SSL_ENABLED.get(settings); @@ -150,16 +159,51 @@ protected ChannelHandler getClientChannelInitializer(DiscoveryNode node, Connect } @Override - protected InboundPipeline getInboundPipeline(boolean isRemoteClusterServerChannel) { - return new InboundPipeline( - getStatsTracker(), - threadPool::relativeTimeInMillis, - isRemoteClusterServerChannel - ? new InboundDecoder(recycler, RemoteClusterPortSettings.MAX_REQUEST_HEADER_SIZE.get(settings)) - : new InboundDecoder(recycler), - new InboundAggregator(getInflightBreaker(), getRequestHandlers()::getHandler, ignoreDeserializationErrors()), - this::inboundMessage - ); + protected InboundPipeline getInboundPipeline(Channel channel, boolean isRemoteClusterServerChannel) { + if (false == isRemoteClusterServerChannel) { + return super.getInboundPipeline(channel, false); + } else { + return new InboundPipeline( + getStatsTracker(), + threadPool::relativeTimeInMillis, + new InboundDecoder(recycler, RemoteClusterPortSettings.MAX_REQUEST_HEADER_SIZE.get(settings), SERVER), + new InboundAggregator(getInflightBreaker(), getRequestHandlers()::getHandler, ignoreDeserializationErrors()), + this::inboundMessage + ) { + @Override + protected void headerReceived(Header header) { + if (header.isHandshake() == false) { + // eagerly (before buffering the full request) authenticate all request headers for this type of channel + assert header.isRequest(); + // authn is mostly async, avoid buffering anymore data while authn is in progress + channel.config().setAutoRead(false); + // this prevents thread-context changes to propagate beyond the validation, as netty worker threads are reused + try (ThreadContext.StoredContext ignore = threadPool.getThreadContext().newStoredContext()) { + crossClusterAccessAuthenticationService.tryAuthenticate( + header.getRequestHeaders(), + ActionListener.runAfter(ActionListener.wrap(aVoid -> { + // authn is successful -> NOOP (the complete request will be subsequently authn & authz & audited) + // Header#toString does not print credentials (which are stored in request headers) + logger.debug("Transport CCS authentication SUCCESS for [{}] on channel [{}]", header, channel); + }, e -> { + // Header#toString does not print credentials (which are stored in request headers) + logger.debug( + "Transport CCS authentication FAIL for [{}] with [{}], closing channel [{}]", + header, + e.getMessage(), + channel + ); + channel.eventLoop() + .submit(() -> channel.pipeline().fireExceptionCaught(new HeaderValidationException(header, e))); + }), () -> channel.config().setAutoRead(true)) + ); + } + } + // go on with the message parts + super.headerReceived(header); + } + }; + } } @Override diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java index 866a3bf1bad48..e84f6f3efeadb 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/InitialNodeSecurityAutoConfiguration.java @@ -187,7 +187,7 @@ protected void doRun() { } }, backoff); } - }, TimeValue.timeValueSeconds(9), ThreadPool.Names.GENERIC)); + }, TimeValue.timeValueSeconds(9), threadPool.generic())); } }); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java index 13796249e1771..67feb5c73213c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/Security.java @@ -391,6 +391,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.concurrent.Executor; import java.util.function.BiConsumer; import java.util.function.Function; import java.util.function.Predicate; @@ -1581,7 +1582,7 @@ public List getTransportInterceptors(NamedWriteableRegistr @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { @@ -1626,7 +1627,8 @@ public Map> getTransports( circuitBreakerService, ipFilter, getSslService(), - getNettySharedGroupFactory(settings) + getNettySharedGroupFactory(settings), + crossClusterAccessAuthcService.get() ) ); return transportReference.get(); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java index 2cfb1597f3b60..1f817ad9b6f2f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersAction.java @@ -144,7 +144,10 @@ private void resolveProfileUids(List users, ActionListener { - if (resultsAndErrors.errors().isEmpty()) { + if (resultsAndErrors == null) { + // profile index does not exist + listener.onResponse(null); + } else if (resultsAndErrors.errors().isEmpty()) { assert users.size() == resultsAndErrors.results().size(); final Map profileUidLookup = resultsAndErrors.results() .stream() diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java index bc432ac09c9dc..999a93c8b58e0 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrail.java @@ -104,7 +104,7 @@ void explicitIndexAccessEvent( AuditLevel eventType, Authentication authentication, String action, - String indices, + String[] indices, String requestName, InetSocketAddress remoteAddress, AuthorizationInfo authorizationInfo diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java index 165b3f6f0537c..6fc70832cc383 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/AuditTrailService.java @@ -186,7 +186,7 @@ public void explicitIndexAccessEvent( AuditLevel eventType, Authentication authentication, String action, - String indices, + String[] indices, String requestName, InetSocketAddress remoteAddress, AuthorizationInfo authorizationInfo diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java index e74e0671eb4d2..1b4f4b891c7ea 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrail.java @@ -811,13 +811,12 @@ public void explicitIndexAccessEvent( AuditLevel eventType, Authentication authentication, String action, - String index, + String[] indices, String requestName, InetSocketAddress remoteAddress, AuthorizationInfo authorizationInfo ) { assert eventType == ACCESS_DENIED || eventType == AuditLevel.ACCESS_GRANTED || eventType == SYSTEM_ACCESS_GRANTED; - final String[] indices = index == null ? null : new String[] { index }; final User user = authentication.getEffectiveSubject().getUser(); if (user instanceof InternalUser && eventType == ACCESS_GRANTED) { eventType = SYSTEM_ACCESS_GRANTED; @@ -830,7 +829,7 @@ public void explicitIndexAccessEvent( // can be null for API keys created before version 7.7 Optional.ofNullable(ApiKeyService.getCreatorRealmName(authentication)), Optional.of(authorizationInfo), - Optional.ofNullable(indices), + Optional.of(indices), Optional.of(action) ) ) == false) { diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java index 0e817d55e5be4..866bac68c33dd 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationService.java @@ -112,6 +112,17 @@ public void authenticate(final String action, final TransportRequest request, fi } } + public void tryAuthenticate(Map headers, ActionListener listener) { + final ApiKeyService.ApiKeyCredentials credentials; + try { + credentials = extractApiKeyCredentialsFromHeaders(headers); + } catch (Exception e) { + listener.onFailure(e); + return; + } + tryAuthenticate(credentials, listener); + } + public void tryAuthenticate(ApiKeyService.ApiKeyCredentials credentials, ActionListener listener) { Objects.requireNonNull(credentials); apiKeyService.tryAuthenticate(clusterService.threadPool().getThreadContext(), credentials, ActionListener.wrap(authResult -> { @@ -146,7 +157,7 @@ public void tryAuthenticate(ApiKeyService.ApiKeyCredentials credentials, ActionL public ApiKeyService.ApiKeyCredentials extractApiKeyCredentialsFromHeaders(Map headers) { try { apiKeyService.ensureEnabled(); - final String credentials = headers.get(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY); + final String credentials = headers == null ? null : headers.get(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY); if (credentials == null) { throw requiredHeaderMissingException(CROSS_CLUSTER_ACCESS_CREDENTIALS_HEADER_KEY); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java index 79a211047e182..e543248b8ad1d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/TokenService.java @@ -16,6 +16,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest.OpType; import org.elasticsearch.action.DocWriteResponse; @@ -144,7 +145,6 @@ import static org.elasticsearch.core.Strings.format; import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK; import static org.elasticsearch.search.SearchService.DEFAULT_KEEPALIVE_SETTING; -import static org.elasticsearch.threadpool.ThreadPool.Names.GENERIC; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.ClientHelper.executeAsyncWithOrigin; @@ -203,12 +203,12 @@ public final class TokenService { static final int MINIMUM_BYTES = VERSION_BYTES + TOKEN_LENGTH + 1; static final int LEGACY_MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * LEGACY_MINIMUM_BYTES) / 3)).intValue(); public static final int MINIMUM_BASE64_BYTES = Double.valueOf(Math.ceil((4 * MINIMUM_BYTES) / 3)).intValue(); - static final TransportVersion VERSION_HASHED_TOKENS = TransportVersion.V_7_2_0; - static final TransportVersion VERSION_TOKENS_INDEX_INTRODUCED = TransportVersion.V_7_2_0; - static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersion.V_7_2_0; - static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersion.V_7_2_0; - static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersion.V_8_2_0; - static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersion.V_8_500_040; + static final TransportVersion VERSION_HASHED_TOKENS = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_TOKENS_INDEX_INTRODUCED = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_ACCESS_TOKENS_AS_UUIDS = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_MULTIPLE_CONCURRENT_REFRESHES = TransportVersions.V_7_2_0; + static final TransportVersion VERSION_CLIENT_AUTH_FOR_REFRESH = TransportVersions.V_8_2_0; + static final TransportVersion VERSION_GET_TOKEN_DOC_FOR_REFRESH = TransportVersions.V_8_500_040; private static final Logger logger = LogManager.getLogger(TokenService.class); @@ -1004,7 +1004,7 @@ private void indexInvalidation( listener ), backoff.next(), - GENERIC + client.threadPool().generic() ); } else { if (retryTokenDocIds.isEmpty() == false) { @@ -1048,7 +1048,7 @@ private void indexInvalidation( listener ), backoff.next(), - GENERIC + client.threadPool().generic() ); } else { listener.onFailure(e); @@ -1161,7 +1161,7 @@ private void findTokenFromRefreshToken( .schedule( () -> findTokenFromRefreshToken(refreshToken, tokensIndexManager, backoff, listener), backofTimeValue, - GENERIC + client.threadPool().generic() ); } else { logger.warn("failed to find token from refresh token after all retries"); @@ -1331,7 +1331,7 @@ private void innerRefresh( .schedule( () -> innerRefresh(refreshToken, tokenDoc, clientAuth, backoff, refreshRequested, listener), backoff.next(), - GENERIC + client.threadPool().generic() ); } else { logger.info( @@ -1367,7 +1367,7 @@ public void onFailure(Exception e) { .schedule( () -> getTokenDocAsync(tokenDoc.id(), refreshedTokenIndex, true, this), backoff.next(), - GENERIC + client.threadPool().generic() ); } else { logger.warn("could not get token document [{}] for refresh after all retries", tokenDoc.id()); @@ -1385,7 +1385,7 @@ public void onFailure(Exception e) { .schedule( () -> innerRefresh(refreshToken, tokenDoc, clientAuth, backoff, refreshRequested, listener), backoff.next(), - GENERIC + client.threadPool().generic() ); } else { logger.warn("failed to update the original token document [{}], after all retries", tokenDoc.id()); @@ -1455,7 +1455,11 @@ void decryptAndReturnSupersedingTokens( if (backoff.hasNext()) { logger.info("could not get token document [{}] that should have been created, retrying", tokenDocId); client.threadPool() - .schedule(() -> getTokenDocAsync(tokenDocId, tokensIndex, false, actionListener), backoff.next(), GENERIC); + .schedule( + () -> getTokenDocAsync(tokenDocId, tokensIndex, false, actionListener), + backoff.next(), + client.threadPool().generic() + ); } else { logger.warn("could not get token document [{}] that should have been created after all retries", tokenDocId); onFailure.accept(invalidGrantException("could not refresh the requested token")); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java index 5c04da1b3bc9b..9b66a1353ee78 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealm.java @@ -374,7 +374,8 @@ private User tryAuthenticateWithCache(final String tokenPrincipal, final BytesAr return null; } - private void processValidatedJwt( + // package private for testing + void processValidatedJwt( String tokenPrincipal, BytesArray jwtCacheKey, JWTClaimsSet claimsSet, @@ -460,7 +461,7 @@ private void invalidateJwtCache() { if (isCacheEnabled()) { try { logger.trace("Invalidating JWT cache for realm [{}]", name()); - try (ReleasableLock ignored = jwtCacheHelper.acquireUpdateLock()) { + try (ReleasableLock ignored = jwtCacheHelper.acquireForIterator()) { jwtCache.invalidateAll(); } logger.debug("Invalidated JWT cache for realm [{}]", name()); @@ -493,6 +494,16 @@ private Map buildUserMetadata(JWTClaimsSet claimsSet) { return Map.copyOf(metadata); } + // We construct the token principal as a function of the JWT realm configuration. We also short circuit the extraction of the + // token principal while we iterate through the realms. For realms like the file realm this is not an issue since there is only + // one file realm. For realms like LDAP this is also not an issue since the token principal is identical across all realms regardless + // of how the realm is configured. However, for realms like JWT (and PKI realm) where the token principal is a function of the + // realm configuration AND multiple realms of that type can exist this can be an issue. This is an issue because realm1 might + // result in the token principal "abc", but realm2 (same JWT) might result in the token principal as "xyz". Since we short circuit the + // extraction of the token principal (i.e. use the first one that does not error) then the same JWT token can result in a + // token principal of either "abc" or "xyz" depending on which came first. This means that we can not rely on the value calculated here + // to be logically correct within the context of a given realm. The value is technically correct as the value is a function of + // the JWT itself, but which function (from realm1 or realm2) can not be known. The value emitted here should be used judiciously. private String buildTokenPrincipal(JWTClaimsSet jwtClaimsSet) { final Map fallbackClaimNames = jwtAuthenticator.getFallbackClaimNames(); final FallbackableClaim subClaim = new FallbackableClaim("sub", fallbackClaimNames, jwtClaimsSet); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java index 83a5046f412ac..c5894274a469c 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authc/ldap/LdapRealm.java @@ -14,12 +14,12 @@ import org.elasticsearch.action.support.ContextPreservingActionListener; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.threadpool.ThreadPool; -import org.elasticsearch.threadpool.ThreadPool.Names; import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.xpack.core.security.authc.AuthenticationResult; import org.elasticsearch.xpack.core.security.authc.Realm; @@ -159,7 +159,7 @@ protected void doAuthenticate(UsernamePasswordToken token, ActionListener userActionList logger ); threadPool.generic().execute(cancellableLdapRunnable); - threadPool.schedule(cancellableLdapRunnable::maybeTimeout, executionTimeout, Names.SAME); + threadPool.schedule(cancellableLdapRunnable::maybeTimeout, executionTimeout, EsExecutors.DIRECT_EXECUTOR_SERVICE); } else { userActionListener.onResponse(null); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java index 7edad950b8491..142490aa90331 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/authz/AuthorizationService.java @@ -750,32 +750,16 @@ private void authorizeBulkItems( final BulkShardRequest request = (BulkShardRequest) requestInfo.getRequest(); // Maps original-index -> expanded-index-name (expands date-math, but not aliases) final Map resolvedIndexNames = new HashMap<>(); - // Maps action -> resolved indices set - final Map> actionToIndicesMap = new HashMap<>(); + // Maps action -> resolved indices set (there are 4 action types total) + final Map> actionToIndicesMap = new HashMap<>(4); final AuditTrail auditTrail = auditTrailService.get(); resolvedIndicesAsyncSupplier.getAsync(ActionListener.wrap(overallResolvedIndices -> { final Set localIndices = new HashSet<>(overallResolvedIndices.getLocal()); for (BulkItemRequest item : request.items()) { final String itemAction = getAction(item); - String resolvedIndex = resolvedIndexNames.computeIfAbsent(item.index(), key -> { - final ResolvedIndices resolvedIndices = IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards( - itemAction, - item.request() - ); - if (resolvedIndices.getRemote().size() != 0) { - throw illegalArgument( - "Bulk item should not write to remote indices, but request writes to " - + String.join(",", resolvedIndices.getRemote()) - ); - } - if (resolvedIndices.getLocal().size() != 1) { - throw illegalArgument( - "Bulk item should write to exactly 1 index, but request writes to " - + String.join(",", resolvedIndices.getLocal()) - ); - } - final String resolved = resolvedIndices.getLocal().get(0); + final String resolvedIndex = resolvedIndexNames.computeIfAbsent(item.index(), key -> { + final String resolved = resolveIndexNameDateMath(item); if (localIndices.contains(resolved) == false) { throw illegalArgument( "Found bulk item that writes to index " + resolved + " but the request writes to " + localIndices @@ -783,17 +767,12 @@ private void authorizeBulkItems( } return resolved; }); - - actionToIndicesMap.compute(itemAction, (key, resolvedIndicesSet) -> { - final Set localSet = resolvedIndicesSet != null ? resolvedIndicesSet : new HashSet<>(); - localSet.add(resolvedIndex); - return localSet; - }); + actionToIndicesMap.compute(itemAction, (ignore, resolvedIndicesSet) -> addToOrCreateSet(resolvedIndicesSet, resolvedIndex)); } final ActionListener>> bulkAuthzListener = ActionListener.wrap( collection -> { - final Map actionToIndicesAccessControl = new HashMap<>(); + final Map actionToIndicesAccessControl = new HashMap<>(4); collection.forEach(tuple -> { final IndicesAccessControl existing = actionToIndicesAccessControl.putIfAbsent( tuple.v1(), @@ -804,23 +783,20 @@ private void authorizeBulkItems( } }); + final Map> actionToGrantedIndicesMap = new HashMap<>(4); + final Map> actionToDeniedIndicesMap = new HashMap<>(4); for (BulkItemRequest item : request.items()) { final String resolvedIndex = resolvedIndexNames.get(item.index()); final String itemAction = getAction(item); - final IndicesAccessControl indicesAccessControl = actionToIndicesAccessControl.get(itemAction); - final IndicesAccessControl.IndexAccessControl indexAccessControl = indicesAccessControl.getIndexPermissions( - resolvedIndex - ); - if (indexAccessControl == null) { - auditTrail.explicitIndexAccessEvent( - requestId, - AuditLevel.ACCESS_DENIED, - authentication, + if (actionToIndicesAccessControl.get(itemAction).hasIndexPermissions(resolvedIndex)) { + actionToGrantedIndicesMap.compute( itemAction, - resolvedIndex, - item.getClass().getSimpleName(), - request.remoteAddress(), - authzInfo + (ignore, resolvedIndicesSet) -> addToOrCreateSet(resolvedIndicesSet, resolvedIndex) + ); + } else { + actionToDeniedIndicesMap.compute( + itemAction, + (ignore, resolvedIndicesSet) -> addToOrCreateSet(resolvedIndicesSet, resolvedIndex) ); item.abort( resolvedIndex, @@ -833,19 +809,32 @@ private void authorizeBulkItems( null ) ); - } else { - auditTrail.explicitIndexAccessEvent( - requestId, - AuditLevel.ACCESS_GRANTED, - authentication, - itemAction, - resolvedIndex, - item.getClass().getSimpleName(), - request.remoteAddress(), - authzInfo - ); } } + actionToDeniedIndicesMap.forEach((action, resolvedIndicesSet) -> { + auditTrail.explicitIndexAccessEvent( + requestId, + AuditLevel.ACCESS_DENIED, + authentication, + action, + resolvedIndicesSet.toArray(new String[0]), + BulkItemRequest.class.getSimpleName(), + request.remoteAddress(), + authzInfo + ); + }); + actionToGrantedIndicesMap.forEach((action, resolvedIndicesSet) -> { + auditTrail.explicitIndexAccessEvent( + requestId, + AuditLevel.ACCESS_GRANTED, + authentication, + action, + resolvedIndicesSet.toArray(new String[0]), + BulkItemRequest.class.getSimpleName(), + request.remoteAddress(), + authzInfo + ); + }); listener.onResponse(null); }, listener::onFailure @@ -876,6 +865,30 @@ private void authorizeBulkItems( }, listener::onFailure)); } + private static Set addToOrCreateSet(Set set, String item) { + final Set localSet = set != null ? set : new HashSet<>(4); + localSet.add(item); + return localSet; + } + + private static String resolveIndexNameDateMath(BulkItemRequest bulkItemRequest) { + final ResolvedIndices resolvedIndices = IndicesAndAliasesResolver.resolveIndicesAndAliasesWithoutWildcards( + getAction(bulkItemRequest), + bulkItemRequest.request() + ); + if (resolvedIndices.getRemote().size() != 0) { + throw illegalArgument( + "Bulk item should not write to remote indices, but request writes to " + String.join(",", resolvedIndices.getRemote()) + ); + } + if (resolvedIndices.getLocal().size() != 1) { + throw illegalArgument( + "Bulk item should write to exactly 1 index, but request writes to " + String.join(",", resolvedIndices.getLocal()) + ); + } + return resolvedIndices.getLocal().get(0); + } + private static IllegalArgumentException illegalArgument(String message) { assert false : message; return new IllegalArgumentException(message); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java index a85c0496bfe08..1e874dead9956 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/enrollment/InternalEnrollmentTokenGenerator.java @@ -38,7 +38,6 @@ import java.util.List; import java.util.function.Consumer; -import static org.elasticsearch.threadpool.ThreadPool.Names.GENERIC; import static org.elasticsearch.xpack.core.ClientHelper.SECURITY_ORIGIN; import static org.elasticsearch.xpack.core.XPackSettings.ENROLLMENT_ENABLED; @@ -76,7 +75,8 @@ public void maybeCreateNodeEnrollmentToken(Consumer consumer, Iterator maybeCreateNodeEnrollmentToken(consumer, backoff), backoff.next(), GENERIC); + client.threadPool() + .schedule(() -> maybeCreateNodeEnrollmentToken(consumer, backoff), backoff.next(), client.threadPool().generic()); } else { LOGGER.warn("Unable to get local node's HTTP/transport info after all retries."); consumer.accept(null); @@ -141,7 +141,8 @@ public void createKibanaEnrollmentToken(Consumer consumer, Iter if (null == httpInfo) { if (backoff.hasNext()) { LOGGER.info("Local node's HTTP info is not yet available, will retry..."); - client.threadPool().schedule(() -> createKibanaEnrollmentToken(consumer, backoff), backoff.next(), GENERIC); + client.threadPool() + .schedule(() -> createKibanaEnrollmentToken(consumer, backoff), backoff.next(), client.threadPool().generic()); } else { LOGGER.warn("Unable to get local node's HTTP info after all retries."); consumer.accept(null); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStore.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStore.java index bc23e6a1f37d7..61dc638e1d55d 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStore.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStore.java @@ -29,6 +29,7 @@ import org.elasticsearch.xpack.core.security.authc.Authentication; import org.elasticsearch.xpack.core.security.authc.esnative.NativeRealmSettings; import org.elasticsearch.xpack.core.security.authc.file.FileRealmSettings; +import org.elasticsearch.xpack.core.security.authc.jwt.JwtRealmSettings; import org.elasticsearch.xpack.core.security.authc.service.ServiceAccountSettings; import org.elasticsearch.xpack.security.authc.esnative.ReservedRealm; @@ -192,8 +193,10 @@ private void validate() { } } if (authenticationType == Authentication.AuthenticationType.REALM) { - if (false == FileRealmSettings.TYPE.equals(realmType)) { - validationException.addValidationError("[realm_type] requires [file] when [auth_type] is [realm] or not specified"); + if (false == FileRealmSettings.TYPE.equals(realmType) && false == JwtRealmSettings.TYPE.equals(realmType)) { + validationException.addValidationError( + "when [auth_type] is defined as [realm] then [realm_type] must be defined as [file] or [jwt]" + ); } if (tokenNames != null) { validationException.addValidationError("[token_names] is not valid when [realm_type] is [file]"); diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java index 0544a82448bcc..7be1d0f96c043 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/profile/ProfileService.java @@ -744,7 +744,7 @@ void getOrCreateProfileWithBackoff( .schedule( () -> getOrCreateProfileWithBackoff(subject, profileDocument, backoff, listener), backoffTimeValue, - ThreadPool.Names.GENERIC + client.threadPool().generic() ); } else { // Retry has depleted. This can only happen when the document or the profile index itself gets deleted diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java index 98fc795f36ca5..123814ec38c6f 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptor.java @@ -16,6 +16,7 @@ import org.elasticsearch.common.ssl.SslConfiguration; import org.elasticsearch.common.util.Maps; import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.RunOnce; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.license.LicenseUtils; @@ -55,6 +56,7 @@ import java.util.Collections; import java.util.Map; import java.util.Optional; +import java.util.concurrent.Executor; import java.util.function.Function; import static org.elasticsearch.core.Strings.format; @@ -458,7 +460,7 @@ void assertNoAuthentication(String action) { @Override public TransportRequestHandler interceptHandler( String action, - String executor, + Executor executor, boolean forceExecution, TransportRequestHandler actualHandler ) { @@ -515,7 +517,7 @@ public static class ProfileSecuredRequestHandler imp private final TransportRequestHandler handler; private final Map profileFilters; private final ThreadContext threadContext; - private final String executorName; + private final Executor executor; private final ThreadPool threadPool; private final boolean forceExecution; private final Logger logger; @@ -524,14 +526,14 @@ public static class ProfileSecuredRequestHandler imp Logger logger, String action, boolean forceExecution, - String executorName, + Executor executor, TransportRequestHandler handler, Map profileFilters, ThreadPool threadPool ) { this.logger = logger; this.action = action; - this.executorName = executorName; + this.executor = executor; this.handler = handler; this.profileFilters = profileFilters; this.threadContext = threadPool.getThreadContext(); @@ -572,16 +574,7 @@ public void onAfter() { @Override public String toString() { - return "ProfileSecuredRequestHandler{" - + "action='" - + action - + '\'' - + ", executorName='" - + executorName - + '\'' - + ", forceExecution=" - + forceExecution - + '}'; + return "ProfileSecuredRequestHandler{" + "action='" + action + '\'' + ", forceExecution=" + forceExecution + '}'; } @Override @@ -602,7 +595,7 @@ public void messageReceived(T request, TransportChannel channel, Task task) { final AbstractRunnable receiveMessage = getReceiveRunnable(request, channel, task); final ActionListener filterListener; - if (ThreadPool.Names.SAME.equals(executorName)) { + if (executor == EsExecutors.DIRECT_EXECUTOR_SERVICE) { filterListener = new AbstractFilterListener(receiveMessage) { @Override public void onResponse(Void unused) { @@ -625,7 +618,7 @@ public void onResponse(Void unused) { receiveMessage.run(); } else { try { - threadPool.executor(executorName).execute(receiveMessage); + executor.execute(receiveMessage); } catch (Exception e) { onFailure(e); } diff --git a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java index ffd7437eab2e5..f930f6dbe04f2 100644 --- a/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java +++ b/x-pack/plugin/security/src/main/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransport.java @@ -21,6 +21,7 @@ import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.xpack.core.security.transport.netty4.SecurityNetty4Transport; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import org.elasticsearch.xpack.security.transport.filter.IPFilter; public class SecurityNetty4ServerTransport extends SecurityNetty4Transport { @@ -38,7 +39,8 @@ public SecurityNetty4ServerTransport( final CircuitBreakerService circuitBreakerService, @Nullable final IPFilter authenticator, final SSLService sslService, - final SharedGroupFactory sharedGroupFactory + final SharedGroupFactory sharedGroupFactory, + final CrossClusterAccessAuthenticationService crossClusterAccessAuthenticationService ) { super( settings, @@ -49,7 +51,8 @@ public SecurityNetty4ServerTransport( namedWriteableRegistry, circuitBreakerService, sslService, - sharedGroupFactory + sharedGroupFactory, + crossClusterAccessAuthenticationService ); this.authenticator = authenticator; } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java index 3abb40d23cf8a..07c858f10f447 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityContextTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.settings.Settings; @@ -190,7 +191,7 @@ public void testExecuteAfterRewritingAuthenticationWillConditionallyRewriteNewAp final Authentication original = AuthenticationTestHelper.builder() .apiKey() .metadata(metadata) - .transportVersion(TransportVersion.V_8_0_0) + .transportVersion(TransportVersions.V_8_0_0) .build(); original.writeToContext(threadContext); @@ -205,7 +206,7 @@ public void testExecuteAfterRewritingAuthenticationWillConditionallyRewriteNewAp Map.of("limitedBy role", Map.of("cluster", List.of("all"))), authentication.getAuthenticatingSubject().getMetadata().get(AuthenticationField.API_KEY_LIMITED_ROLE_DESCRIPTORS_KEY) ); - }, TransportVersion.V_7_8_0); + }, TransportVersions.V_7_8_0); // If target is new node, no need to rewrite the new style API key metadata securityContext.executeAfterRewritingAuthentication(originalCtx -> { @@ -215,7 +216,7 @@ public void testExecuteAfterRewritingAuthenticationWillConditionallyRewriteNewAp } public void testExecuteAfterRewritingAuthenticationWillConditionallyRewriteOldApiKeyMetadata() throws IOException { - final Authentication original = AuthenticationTestHelper.builder().apiKey().transportVersion(TransportVersion.V_7_8_0).build(); + final Authentication original = AuthenticationTestHelper.builder().apiKey().transportVersion(TransportVersions.V_7_8_0).build(); // original authentication has the old style of role descriptor maps assertThat( @@ -233,7 +234,7 @@ public void testExecuteAfterRewritingAuthenticationWillConditionallyRewriteOldAp securityContext.executeAfterRewritingAuthentication(originalCtx -> { Authentication authentication = securityContext.getAuthentication(); assertSame(original.getAuthenticatingSubject().getMetadata(), authentication.getAuthenticatingSubject().getMetadata()); - }, TransportVersion.V_7_8_0); + }, TransportVersions.V_7_8_0); // If target is new node, ensure old map style API key metadata is rewritten to bytesreference securityContext.executeAfterRewritingAuthentication(originalCtx -> { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java index 476581132c30d..00f478f68b6ba 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/action/user/TransportGetUsersActionTests.java @@ -83,6 +83,7 @@ public class TransportGetUsersActionTests extends ESTestCase { private boolean hasAnonymousProfile; private boolean hasReservedProfile; private boolean hasNativeProfile; + private boolean profileIndexExists; @Before public void maybeEnableAnonymous() { @@ -96,6 +97,7 @@ public void maybeEnableAnonymous() { hasAnonymousProfile = randomBoolean(); hasReservedProfile = randomBoolean(); hasNativeProfile = randomBoolean(); + profileIndexExists = randomBoolean(); } @After @@ -163,7 +165,7 @@ public void onFailure(Exception e) { } else { assertThat("expected an empty array but got: " + Arrays.toString(users), users, emptyArray()); } - if (withProfileUid) { + if (profileIndexExists && withProfileUid) { assertThat( responseRef.get().getProfileUidLookup(), equalTo( @@ -248,7 +250,7 @@ public void onFailure(Exception e) { assertThat(throwableRef.get(), is(nullValue())); assertThat(responseRef.get(), is(notNullValue())); assertThat(users, arrayContaining(reservedUsers.toArray(new User[reservedUsers.size()]))); - if (withProfileUid) { + if (profileIndexExists && withProfileUid) { assertThat(responseRef.get().getProfileUidLookup(), equalTo(reservedUsers.stream().filter(user -> { if (user instanceof AnonymousUser) { return hasAnonymousProfile; @@ -340,7 +342,7 @@ public void onFailure(Exception e) { assertThat(throwableRef.get(), is(nullValue())); assertThat(responseRef.get(), is(notNullValue())); assertThat(responseRef.get().users(), arrayContaining(expectedList.toArray(new User[expectedList.size()]))); - if (withProfileUid) { + if (profileIndexExists && withProfileUid) { assertThat(responseRef.get().getProfileUidLookup(), equalTo(expectedList.stream().filter(user -> { if (user instanceof AnonymousUser) { return hasAnonymousProfile; @@ -399,6 +401,7 @@ public void testGetUsersWithProfileUidException() { null, Collections.emptySet() ); + profileIndexExists = true; // profile index must exist to simulate exception on search TransportGetUsersAction action = new TransportGetUsersAction( Settings.EMPTY, mock(ActionFilters.class), @@ -496,7 +499,7 @@ public void onFailure(Exception e) { assertThat(throwableRef.get(), is(nullValue())); assertThat(responseRef.get(), is(notNullValue())); assertThat(responseRef.get().users(), arrayContaining(expectedList.toArray(new User[expectedList.size()]))); - if (withProfileUid) { + if (profileIndexExists && withProfileUid) { assertThat( responseRef.get().getProfileUidLookup(), equalTo( @@ -611,8 +614,12 @@ private ProfileService mockProfileService() { private ProfileService mockProfileService(boolean randomException) { final ProfileService profileService = mock(ProfileService.class); doAnswer(invocation -> { - final List subjects = (List) invocation.getArguments()[0]; final var listener = (ActionListener>) invocation.getArguments()[1]; + if (false == profileIndexExists) { + listener.onResponse(null); + return null; + } + final List subjects = (List) invocation.getArguments()[0]; List> results = subjects.stream().map(subject -> { final User user = subject.getUser(); if (user instanceof AnonymousUser) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java index 58ccd75f91b14..9ec2e8be383b6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/audit/logfile/LoggingAuditTrailTests.java @@ -2027,7 +2027,7 @@ public void testSystemAccessGranted() throws Exception { randomFrom(AuditLevel.ACCESS_GRANTED, AuditLevel.SYSTEM_ACCESS_GRANTED), authentication, "_action", - randomFrom(randomAlphaOfLengthBetween(1, 4), null), + new String[] { randomAlphaOfLengthBetween(1, 4) }, BulkItemRequest.class.getName(), request.remoteAddress(), authorizationInfo @@ -2059,13 +2059,13 @@ public void testSystemAccessGranted() throws Exception { assertMsg(logger, checkedFields, checkedArrayFields); clearLog(); - String index = randomFrom(randomAlphaOfLengthBetween(1, 4), null); + String[] indices = randomArray(0, 4, String[]::new, () -> randomBoolean() ? null : randomAlphaOfLengthBetween(1, 4)); auditTrail.explicitIndexAccessEvent( requestId, randomFrom(AuditLevel.ACCESS_GRANTED, AuditLevel.SYSTEM_ACCESS_GRANTED), authentication, "_action", - index, + indices, BulkItemRequest.class.getName(), request.remoteAddress(), authorizationInfo @@ -2084,9 +2084,7 @@ public void testSystemAccessGranted() throws Exception { opaqueId(threadContext, checkedFields); traceId(threadContext, checkedFields); forwardedFor(threadContext, checkedFields); - if (index != null) { - checkedArrayFields.put(LoggingAuditTrail.INDICES_FIELD_NAME, new String[] { index }); - } + checkedArrayFields.put(LoggingAuditTrail.INDICES_FIELD_NAME, indices); assertMsg(logger, checkedFields, checkedArrayFields); } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java index ac80c7b5bbb64..117d1f1fe14bb 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/ApiKeyServiceTests.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.TotalHits; import org.elasticsearch.ElasticsearchException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; @@ -2341,7 +2342,7 @@ public void testMaybeRemoveRemoteIndicesPrivilegesWithUnsupportedVersion() { // Selecting random unsupported version. final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) ); @@ -2431,7 +2432,7 @@ public void testCreateCrossClusterApiKeyMinVersionConstraint() { when(clusterService.state()).thenReturn(clusterState); final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) ); when(clusterState.getMinTransportVersion()).thenReturn(minTransportVersion); @@ -2566,7 +2567,7 @@ public void testCreateOrUpdateApiKeyWithWorkflowsRestrictionForUnsupportedVersio when(clusterService.state()).thenReturn(clusterState); final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(WORKFLOWS_RESTRICTION_VERSION) ); when(clusterState.getMinTransportVersion()).thenReturn(minTransportVersion); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java index ed111f595483d..0b2243a77f21e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/CrossClusterAccessAuthenticationServiceTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.service.ClusterService; @@ -69,7 +70,7 @@ public void testAuthenticateThrowsOnUnsupportedMinVersions() throws IOException clusterService = mockClusterServiceWithMinTransportVersion( TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, TransportVersionUtils.getPreviousVersion(TRANSPORT_VERSION_ADVANCED_REMOTE_CLUSTER_SECURITY) ) ); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java index 01e9c7587f8db..5faeb02f7029f 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/TokenServiceTests.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.DocWriteRequest; @@ -240,10 +241,10 @@ private static DiscoveryNode addAnother7071DataNode(ClusterService clusterServic TransportVersion transportVersion; if (randomBoolean()) { version = Version.V_7_0_0; - transportVersion = TransportVersion.V_7_0_0; + transportVersion = TransportVersions.V_7_0_0; } else { version = Version.V_7_1_0; - transportVersion = TransportVersion.V_7_1_0; + transportVersion = TransportVersions.V_7_1_0; } return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); } @@ -253,10 +254,10 @@ private static DiscoveryNode addAnotherPre8500DataNode(ClusterService clusterSer TransportVersion transportVersion; if (randomBoolean()) { version = Version.V_8_8_1; - transportVersion = TransportVersion.V_8_8_1; + transportVersion = TransportVersions.V_8_8_1; } else { version = Version.V_8_9_0; - transportVersion = TransportVersion.V_8_500_015; + transportVersion = TransportVersions.V_8_500_020; } return addAnotherDataNodeWithVersion(clusterService, version, transportVersion); } @@ -824,8 +825,8 @@ public void testNonExistingPre72Token() throws Exception { storeTokenHeader( requestContext, tokenService.prependVersionAndEncodeAccessToken( - TransportVersion.V_7_1_0, - tokenService.getRandomTokenBytes(TransportVersion.V_7_1_0, randomBoolean()).v1() + TransportVersions.V_7_1_0, + tokenService.getRandomTokenBytes(TransportVersions.V_7_1_0, randomBoolean()).v1() ) ); @@ -846,7 +847,7 @@ public void testNonExistingUUIDToken() throws Exception { .build(false); mockGetTokenFromAccessTokenBytes(tokenService, tokenService.getRandomTokenBytes(randomBoolean()).v1(), authentication, false, null); ThreadContext requestContext = new ThreadContext(Settings.EMPTY); - TransportVersion uuidTokenVersion = randomFrom(TransportVersion.V_7_2_0, TransportVersion.V_7_3_2); + TransportVersion uuidTokenVersion = randomFrom(TransportVersions.V_7_2_0, TransportVersions.V_7_3_2); storeTokenHeader( requestContext, tokenService.prependVersionAndEncodeAccessToken( diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java index 51c6b677b56b0..4cb510e9345f6 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/jwt/JwtRealmAuthenticateTests.java @@ -15,6 +15,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.settings.MockSecureSettings; import org.elasticsearch.common.settings.SecureString; import org.elasticsearch.common.settings.Settings; @@ -32,6 +33,8 @@ import java.util.Collections; import java.util.Date; import java.util.List; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -530,4 +533,82 @@ public void testSameIssuerTwoRealmsDifferentClientSecrets() throws Exception { final int jwtAuthcCount = randomIntBetween(2, 3); doMultipleAuthcAuthzAndVerifySuccess(jwtIssuerAndRealm.realm(), user, jwt, clientSecret, jwtAuthcCount); } + + public void testConcurrentPutAndInvalidateCacheWorks() throws Exception { + jwtIssuerAndRealms = generateJwtIssuerRealmPairs( + randomIntBetween(1, 1), // realmsRange + randomIntBetween(0, 0), // authzRange + randomIntBetween(1, JwtRealmSettings.SUPPORTED_SIGNATURE_ALGORITHMS.size()), // algsRange + randomIntBetween(1, 1), // audiencesRange + randomIntBetween(1, 1), // usersRange + randomIntBetween(1, 1), // rolesRange + randomIntBetween(1, 1), // jwtCacheSizeRange set to 1 for constant eviction that is necessary to trigger the locking when put + false // createHttpsServer + ); + + final JwtIssuerAndRealm jwtIssuerAndRealm = randomJwtIssuerRealmPair(); + final User user = randomUser(jwtIssuerAndRealm.issuer()); + final SecureString jwt = randomJwt(jwtIssuerAndRealm, user); + final SignedJWT parsedJwt = SignedJWT.parse(jwt.toString()); + final JWTClaimsSet validClaimsSet = parsedJwt.getJWTClaimsSet(); + + final int processors = Runtime.getRuntime().availableProcessors(); + final int numberOfThreads = Math.min(50, scaledRandomIntBetween((processors + 1) / 2, 4 * processors)); // up to 50 threads + final Thread[] threads = new Thread[numberOfThreads]; + final CountDownLatch threadsCountDown = new CountDownLatch(numberOfThreads); + final CountDownLatch racingCountDown = new CountDownLatch(1); + final CountDownLatch completionCountDown = new CountDownLatch(numberOfThreads); + + for (int i = 0; i < numberOfThreads; i++) { + if (randomBoolean()) { + threads[i] = new Thread(() -> { + threadsCountDown.countDown(); + try { + if (racingCountDown.await(10, TimeUnit.SECONDS)) { + jwtIssuerAndRealm.realm().expireAll(); + completionCountDown.countDown(); + } else { + throw new AssertionError("racing is not ready within the given time period"); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + } else { + threads[i] = new Thread(() -> { + final BytesArray jwtCacheKey = new BytesArray(randomAlphaOfLength(10)); + final PlainActionFuture> future = new PlainActionFuture<>(); + threadsCountDown.countDown(); + try { + if (racingCountDown.await(10, TimeUnit.SECONDS)) { + for (int j = 0; j < 10; j++) { + jwtIssuerAndRealm.realm().processValidatedJwt("token-principal", jwtCacheKey, validClaimsSet, future); + assertThat(future.actionGet().getValue().principal(), equalTo(user.principal())); + } + completionCountDown.countDown(); + } else { + throw new AssertionError("Racing is not ready within the given time period"); + } + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + } + threads[i].start(); + } + + if (threadsCountDown.await(10, TimeUnit.SECONDS)) { + racingCountDown.countDown(); + } else { + throw new AssertionError("Threads are not ready within the given time period"); + } + + if (false == completionCountDown.await(30, TimeUnit.SECONDS)) { + throw new AssertionError("Test is not completed in time, check whether threads had deadlock"); + } + + for (Thread thread : threads) { + thread.join(); + } + } } diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java index 56bb40e65f5a1..6b675b61c2a6d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authc/support/mapper/ExpressionRoleMappingTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.security.authc.support.mapper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.bytes.BytesArray; @@ -407,7 +408,7 @@ public void testToXContentWithTemplates() throws Exception { public void testSerialization() throws Exception { final ExpressionRoleMapping original = randomRoleMapping(true); - TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_7_2_0, null); + TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_7_2_0, null); BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); original.writeTo(output); @@ -425,7 +426,11 @@ public void testSerialization() throws Exception { public void testSerializationPreV71() throws Exception { final ExpressionRoleMapping original = randomRoleMapping(false); - TransportVersion version = TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_0_1); + TransportVersion version = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersions.V_7_0_0, + TransportVersions.V_7_0_1 + ); BytesStreamOutput output = new BytesStreamOutput(); output.setTransportVersion(version); original.writeTo(output); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java index 54baec95d713a..edff46cef16e4 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/AuthorizationServiceTests.java @@ -189,9 +189,11 @@ import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Predicate; +import java.util.function.Supplier; import static java.util.Arrays.asList; import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; @@ -224,6 +226,7 @@ import static org.hamcrest.Matchers.sameInstance; import static org.hamcrest.Matchers.startsWith; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; @@ -2404,7 +2407,396 @@ public void testCompositeActionsIndicesAreCheckedAtTheShardLevel() { assertThrowsAuthorizationException(() -> authorize(createAuthentication(userDenied), action, request), action, "userDenied"); } - public void testAuthorizationOfIndividualBulkItems() { + public void testAuthorizationOfSingleActionMultipleIndicesBulkItems() { + final String action = BulkAction.NAME + "[s]"; + final BulkItemRequest[] items; + final DocWriteRequest.OpType opType = randomFrom(DocWriteRequest.OpType.values()); + // the "good role" authorizes all the bulk items + final RoleDescriptor goodRole; + // the "bad role" does not authorize any bulk items + final RoleDescriptor badRole; + final AtomicInteger idCounter = new AtomicInteger(); + final Set allIndexNames = new HashSet<>(); + final Supplier indexNameSupplier = () -> { + String indexName = randomAlphaOfLengthBetween(1, 4); + allIndexNames.add(indexName); + return indexName; + }; + // build a request with bulk items of the same action type, but multiple index names + switch (opType) { + case INDEX -> { + items = randomArray( + 1, + 8, + BulkItemRequest[]::new, + () -> new BulkItemRequest( + idCounter.get(), + new IndexRequest(indexNameSupplier.get()).id("id" + idCounter.incrementAndGet()) + .opType(DocWriteRequest.OpType.INDEX) + ) + ); + goodRole = new RoleDescriptor( + "good-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("all", "create", "index", "write")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + badRole = new RoleDescriptor( + "bad-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("create_doc", "delete")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + } + case CREATE -> { + items = randomArray( + 1, + 8, + BulkItemRequest[]::new, + () -> new BulkItemRequest( + idCounter.get(), + new IndexRequest(indexNameSupplier.get()).id("id" + idCounter.incrementAndGet()) + .opType(DocWriteRequest.OpType.CREATE) + ) + ); + goodRole = new RoleDescriptor( + "good-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("all", "create_doc", "create", "index", "write")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + badRole = new RoleDescriptor( + "bad-role", + null, + allIndexNames.stream() + .map(indexName -> IndicesPrivileges.builder().indices(indexName).privileges("delete").build()) + .toArray(IndicesPrivileges[]::new), + null + ); + } + case DELETE -> { + items = randomArray( + 1, + 8, + BulkItemRequest[]::new, + () -> new BulkItemRequest( + idCounter.get(), + new DeleteRequest(indexNameSupplier.get(), "id" + idCounter.incrementAndGet()) + ) + ); + goodRole = new RoleDescriptor( + "good-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("all", "delete", "write")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + badRole = new RoleDescriptor( + "bad-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("index", "create", "create_doc")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + } + case UPDATE -> { + items = randomArray( + 1, + 8, + BulkItemRequest[]::new, + () -> new BulkItemRequest( + idCounter.get(), + new UpdateRequest(indexNameSupplier.get(), "id" + idCounter.incrementAndGet()) + ) + ); + goodRole = new RoleDescriptor( + "good-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("all", "index", "write")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + badRole = new RoleDescriptor( + "bad-role", + null, + allIndexNames.stream() + .map( + indexName -> IndicesPrivileges.builder() + .indices(indexName) + .privileges(randomFrom("create", "create_doc", "delete")) + .build() + ) + .toArray(IndicesPrivileges[]::new), + null + ); + } + default -> throw new IllegalStateException("Unexpected value: " + opType); + } + roleMap.put("good-role", goodRole); + roleMap.put("bad-role", badRole); + + final ShardId shardId = new ShardId("some-concrete-shard-index-name", UUID.randomUUID().toString(), 1); + final BulkShardRequest request = new BulkShardRequest(shardId, randomFrom(WriteRequest.RefreshPolicy.values()), items); + + mockEmptyMetadata(); + final Authentication authentication; + final String requestId; + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + authentication = createAuthentication(new User("user", "good-role")); + requestId = AuditUtil.getOrGenerateRequestId(threadContext); + authorize(authentication, action, request); + } + + // bulk shard request is authorized + verify(auditTrail).accessGranted( + eq(requestId), + eq(authentication), + eq(action), + eq(request), + authzInfoRoles(new String[] { goodRole.getName() }) + ); + // there's only one "access granted" record for all the bulk items + verify(auditTrail).explicitIndexAccessEvent(eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), eq(switch (opType) { + case INDEX -> IndexAction.NAME + ":op_type/index"; + case CREATE -> IndexAction.NAME + ":op_type/create"; + case UPDATE -> UpdateAction.NAME; + case DELETE -> DeleteAction.NAME; + }), + argThat( + indicesArrays -> indicesArrays.length == allIndexNames.size() && allIndexNames.containsAll(Arrays.asList(indicesArrays)) + ), + eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), + authzInfoRoles(new String[] { goodRole.getName() }) + ); + verifyNoMoreInteractions(auditTrail); + // all bulk items go through as authorized + for (BulkItemRequest bulkItemRequest : request.items()) { + assertThat(bulkItemRequest.getPrimaryResponse(), nullValue()); + } + + final Authentication badAuthentication; + final String badRequestId; + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + badAuthentication = createAuthentication(new User("bad-user", "bad-role")); + badRequestId = AuditUtil.getOrGenerateRequestId(threadContext); + // the bulk shard request is authorized, but the bulk items are not + authorize(badAuthentication, action, request); + } + // bulk shard request is authorized + verify(auditTrail).accessGranted( + eq(badRequestId), + eq(badAuthentication), + eq(action), + eq(request), + authzInfoRoles(new String[] { badRole.getName() }) + ); + // there's only one "access denied" record for all the bulk items + verify(auditTrail).explicitIndexAccessEvent( + eq(badRequestId), + eq(AuditLevel.ACCESS_DENIED), + eq(badAuthentication), + eq(switch (opType) { + case INDEX -> IndexAction.NAME + ":op_type/index"; + case CREATE -> IndexAction.NAME + ":op_type/create"; + case UPDATE -> UpdateAction.NAME; + case DELETE -> DeleteAction.NAME; + }), + argThat( + indicesArrays -> indicesArrays.length == allIndexNames.size() && allIndexNames.containsAll(Arrays.asList(indicesArrays)) + ), + eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), + authzInfoRoles(new String[] { badRole.getName() }) + ); + verifyNoMoreInteractions(auditTrail); + // all bulk items are failures + for (BulkItemRequest bulkItemRequest : request.items()) { + assertThat(bulkItemRequest.getPrimaryResponse().isFailed(), is(true)); + } + } + + public void testAuthorizationOfMultipleActionsSingleIndexBulkItems() { + final String action = BulkAction.NAME + "[s]"; + final AtomicInteger idCounter = new AtomicInteger(); + final Set actionTypes = new HashSet<>(); + final Set deleteItems = new HashSet<>(); + final String indexName = randomAlphaOfLengthBetween(1, 4); + final BulkItemRequest[] items = randomArray(1, 8, BulkItemRequest[]::new, () -> { + switch (randomFrom(DocWriteRequest.OpType.values())) { + case INDEX -> { + actionTypes.add(IndexAction.NAME + ":op_type/index"); + return new BulkItemRequest( + idCounter.get(), + new IndexRequest(indexName).id("id" + idCounter.incrementAndGet()).opType(DocWriteRequest.OpType.INDEX) + ); + } + case CREATE -> { + actionTypes.add(IndexAction.NAME + ":op_type/create"); + return new BulkItemRequest( + idCounter.get(), + new IndexRequest(indexName).id("id" + idCounter.incrementAndGet()).opType(DocWriteRequest.OpType.CREATE) + ); + } + case DELETE -> { + actionTypes.add(DeleteAction.NAME); + deleteItems.add(idCounter.get()); + return new BulkItemRequest(idCounter.get(), new DeleteRequest(indexName, "id" + idCounter.incrementAndGet())); + } + case UPDATE -> { + actionTypes.add(UpdateAction.NAME); + return new BulkItemRequest(idCounter.get(), new UpdateRequest(indexName, "id" + idCounter.incrementAndGet())); + } + default -> throw new IllegalStateException("Unexpected value"); + } + }); + RoleDescriptor allRole = new RoleDescriptor( + "all-role", + null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(indexName).privileges(randomFrom("all", "write")).build() }, + null + ); + RoleDescriptor indexRole = new RoleDescriptor( + "index-role", + null, + new IndicesPrivileges[] { IndicesPrivileges.builder().indices(indexName).privileges("index").build() }, + null + ); + roleMap.put("all-role", allRole); + roleMap.put("index-role", indexRole); + + final ShardId shardId = new ShardId(indexName, UUID.randomUUID().toString(), 1); + final BulkShardRequest request = new BulkShardRequest(shardId, randomFrom(WriteRequest.RefreshPolicy.values()), items); + + mockEmptyMetadata(); + final Authentication authentication; + final String requestId; + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + authentication = createAuthentication(new User("user", "all-role")); + requestId = AuditUtil.getOrGenerateRequestId(threadContext); + authorize(authentication, action, request); + } + // bulk shard request is authorized + verify(auditTrail).accessGranted( + eq(requestId), + eq(authentication), + eq(action), + eq(request), + authzInfoRoles(new String[] { allRole.getName() }) + ); + // there's one granted audit entry for each action type + actionTypes.forEach(actionType -> { + verify(auditTrail).explicitIndexAccessEvent( + eq(requestId), + eq(AuditLevel.ACCESS_GRANTED), + eq(authentication), + eq(actionType), + eq(new String[] { indexName }), + eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), + authzInfoRoles(new String[] { allRole.getName() }) + ); + }); + verifyNoMoreInteractions(auditTrail); + // all bulk items go through as authorized + for (BulkItemRequest bulkItemRequest : request.items()) { + assertThat(bulkItemRequest.getPrimaryResponse(), nullValue()); + } + + // use the "index" role + final Authentication indexAuthentication; + final String indexRequestId; + try (ThreadContext.StoredContext ignore = threadContext.stashContext()) { + indexAuthentication = createAuthentication(new User("index-user", "index-role")); + indexRequestId = AuditUtil.getOrGenerateRequestId(threadContext); + authorize(indexAuthentication, action, request); + } + // bulk shard request is authorized + verify(auditTrail).accessGranted( + eq(indexRequestId), + eq(indexAuthentication), + eq(action), + eq(request), + authzInfoRoles(new String[] { indexRole.getName() }) + ); + // there's a single granted audit entry for each action type, less the delete action (which is denied) + actionTypes.forEach(actionType -> { + if (actionType.equals(DeleteAction.NAME) == false) { + verify(auditTrail).explicitIndexAccessEvent( + eq(indexRequestId), + eq(AuditLevel.ACCESS_GRANTED), + eq(indexAuthentication), + eq(actionType), + eq(new String[] { indexName }), + eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), + authzInfoRoles(new String[] { indexRole.getName() }) + ); + } + }); + if (deleteItems.isEmpty() == false) { + // there's one denied audit entry for all the delete action types + verify(auditTrail).explicitIndexAccessEvent( + eq(indexRequestId), + eq(AuditLevel.ACCESS_DENIED), + eq(indexAuthentication), + eq(DeleteAction.NAME), + eq(new String[] { indexName }), + eq(BulkItemRequest.class.getSimpleName()), + eq(request.remoteAddress()), + authzInfoRoles(new String[] { indexRole.getName() }) + ); + } + verifyNoMoreInteractions(auditTrail); + for (BulkItemRequest bulkItemRequest : request.items()) { + if (deleteItems.contains(bulkItemRequest.id())) { + assertThat(bulkItemRequest.getPrimaryResponse().isFailed(), is(true)); + } else { + assertThat(bulkItemRequest.getPrimaryResponse(), nullValue()); + } + } + } + + public void testAuthorizationOfIndividualIndexAndDeleteBulkItems() { final String action = BulkAction.NAME + "[s]"; final BulkItemRequest[] items = { new BulkItemRequest(1, new DeleteRequest("concrete-index", "c1")), @@ -2414,7 +2806,7 @@ public void testAuthorizationOfIndividualBulkItems() { new BulkItemRequest(5, new DeleteRequest("alias-2", "a2a")), new BulkItemRequest(6, new IndexRequest("alias-2").id("a2b")) }; final ShardId shardId = new ShardId("concrete-index", UUID.randomUUID().toString(), 1); - final TransportRequest request = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.IMMEDIATE, items); + final BulkShardRequest request = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.IMMEDIATE, items); final Authentication authentication = createAuthentication(new User("user", "my-role")); RoleDescriptor role = new RoleDescriptor( @@ -2437,27 +2829,10 @@ public void testAuthorizationOfIndividualBulkItems() { eq(AuditLevel.ACCESS_GRANTED), eq(authentication), eq(DeleteAction.NAME), - eq("concrete-index"), - eq(BulkItemRequest.class.getSimpleName()), - eq(request.remoteAddress()), - authzInfoRoles(new String[] { role.getName() }) - ); - verify(auditTrail).explicitIndexAccessEvent( - eq(requestId), - eq(AuditLevel.ACCESS_GRANTED), - eq(authentication), - eq(DeleteAction.NAME), - eq("alias-2"), - eq(BulkItemRequest.class.getSimpleName()), - eq(request.remoteAddress()), - authzInfoRoles(new String[] { role.getName() }) - ); - verify(auditTrail).explicitIndexAccessEvent( - eq(requestId), - eq(AuditLevel.ACCESS_GRANTED), - eq(authentication), - eq(IndexAction.NAME + ":op_type/index"), - eq("concrete-index"), + argThat(indicesArrays -> { + Arrays.sort(indicesArrays); + return Arrays.equals(indicesArrays, new String[] { "alias-2", "concrete-index" }); + }), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() }) @@ -2467,7 +2842,10 @@ public void testAuthorizationOfIndividualBulkItems() { eq(AuditLevel.ACCESS_GRANTED), eq(authentication), eq(IndexAction.NAME + ":op_type/index"), - eq("alias-1"), + argThat(indicesArrays -> { + Arrays.sort(indicesArrays); + return Arrays.equals(indicesArrays, new String[] { "alias-1", "concrete-index" }); + }), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() }) @@ -2477,7 +2855,7 @@ public void testAuthorizationOfIndividualBulkItems() { eq(AuditLevel.ACCESS_DENIED), eq(authentication), eq(DeleteAction.NAME), - eq("alias-1"), + eq(new String[] { "alias-1" }), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() }) @@ -2487,7 +2865,7 @@ public void testAuthorizationOfIndividualBulkItems() { eq(AuditLevel.ACCESS_DENIED), eq(authentication), eq(IndexAction.NAME + ":op_type/index"), - eq("alias-2"), + eq(new String[] { "alias-2" }), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() }) @@ -2500,6 +2878,12 @@ public void testAuthorizationOfIndividualBulkItems() { authzInfoRoles(new String[] { role.getName() }) ); // bulk request is allowed verifyNoMoreInteractions(auditTrail); + assertThat(request.items()[0].getPrimaryResponse(), nullValue()); + assertThat(request.items()[1].getPrimaryResponse(), nullValue()); + assertThat(request.items()[2].getPrimaryResponse().isFailed(), is(true)); + assertThat(request.items()[3].getPrimaryResponse(), nullValue()); + assertThat(request.items()[4].getPrimaryResponse(), nullValue()); + assertThat(request.items()[5].getPrimaryResponse().isFailed(), is(true)); } public void testAuthorizationOfIndividualBulkItemsWithDateMath() { @@ -2511,7 +2895,7 @@ public void testAuthorizationOfIndividualBulkItemsWithDateMath() { new BulkItemRequest(4, new DeleteRequest("", "dm2")), // resolves to same as above }; final ShardId shardId = new ShardId("concrete-index", UUID.randomUUID().toString(), 1); - final TransportRequest request = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.IMMEDIATE, items); + final BulkShardRequest request = new BulkShardRequest(shardId, WriteRequest.RefreshPolicy.IMMEDIATE, items); final Authentication authentication = createAuthentication(new User("user", "my-role")); final RoleDescriptor role = new RoleDescriptor( @@ -2527,22 +2911,23 @@ public void testAuthorizationOfIndividualBulkItemsWithDateMath() { authorize(authentication, action, request); // both deletes should fail - verify(auditTrail, times(2)).explicitIndexAccessEvent( + verify(auditTrail).explicitIndexAccessEvent( eq(requestId), eq(AuditLevel.ACCESS_DENIED), eq(authentication), eq(DeleteAction.NAME), - ArgumentMatchers.startsWith("datemath-"), + argThat(indices -> indices.length == 2 && indices[0].startsWith("datemath-") && indices[1].startsWith("datemath-")), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() }) ); - verify(auditTrail, times(2)).explicitIndexAccessEvent( + // both indexing should go through + verify(auditTrail).explicitIndexAccessEvent( eq(requestId), eq(AuditLevel.ACCESS_GRANTED), eq(authentication), eq(IndexAction.NAME + ":op_type/index"), - ArgumentMatchers.startsWith("datemath-"), + argThat(indices -> indices.length == 2 && indices[0].startsWith("datemath-") && indices[1].startsWith("datemath-")), eq(BulkItemRequest.class.getSimpleName()), eq(request.remoteAddress()), authzInfoRoles(new String[] { role.getName() }) @@ -2556,6 +2941,10 @@ public void testAuthorizationOfIndividualBulkItemsWithDateMath() { authzInfoRoles(new String[] { role.getName() }) ); verifyNoMoreInteractions(auditTrail); + assertThat(request.items()[0].getPrimaryResponse(), nullValue()); + assertThat(request.items()[1].getPrimaryResponse().isFailed(), is(true)); + assertThat(request.items()[2].getPrimaryResponse(), nullValue()); + assertThat(request.items()[3].getPrimaryResponse().isFailed(), is(true)); } private BulkShardRequest createBulkShardRequest(String indexName, BiFunction> req) { diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java index dd716b79e1db3..f30fb242abc13 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/CompositeRolesStoreTests.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsAction; import org.elasticsearch.action.admin.cluster.settings.ClusterUpdateSettingsAction; @@ -1884,7 +1885,7 @@ public void testApiKeyAuthUsesApiKeyService() throws Exception { AuditUtil.getOrGenerateRequestId(threadContext); final TransportVersion version = randomFrom( TransportVersion.current(), - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_8_1) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_8_1) ); final Authentication authentication = createApiKeyAuthentication( apiKeyService, @@ -1967,7 +1968,7 @@ public void testApiKeyAuthUsesApiKeyServiceWithScopedRole() throws Exception { AuditUtil.getOrGenerateRequestId(threadContext); final TransportVersion version = randomFrom( TransportVersion.current(), - TransportVersionUtils.randomVersionBetween(random(), TransportVersion.V_7_0_0, TransportVersion.V_7_8_1) + TransportVersionUtils.randomVersionBetween(random(), TransportVersions.V_7_0_0, TransportVersions.V_7_8_1) ); final Authentication authentication = createApiKeyAuthentication( apiKeyService, diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java index 4a35cc675850d..49d5a67b7d20e 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/authz/store/NativeRolesStoreTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; @@ -389,7 +390,7 @@ public void testPutRoleWithRemoteIndicesUnsupportedMinNodeVersion() { ); final TransportVersion minTransportVersion = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.MINIMUM_COMPATIBLE, + TransportVersions.MINIMUM_COMPATIBLE, transportVersionBeforeAdvancedRemoteClusterSecurity ); final ClusterService clusterService = mockClusterServiceWithMinNodeVersion(minTransportVersion); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java index df7e25040c035..1467142072b31 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/operator/FileOperatorUsersStoreTests.java @@ -155,6 +155,17 @@ public void testIsOperator() throws IOException { .build(false) ) ); + final Authentication.RealmRef jwtRealm = new Authentication.RealmRef("jwt1", "jwt", randomAlphaOfLength(8)); + assertTrue( + fileOperatorUsersStore.isOperatorUser( + AuthenticationTestHelper.builder().realm().user(new User("me@elastic.co", randomRoles())).realmRef(jwtRealm).build(false) + ) + ); + assertFalse( + fileOperatorUsersStore.isOperatorUser( + AuthenticationTestHelper.builder().realm().user(new User("you@elastic.co", randomRoles())).realmRef(jwtRealm).build(false) + ) + ); } @@ -175,14 +186,14 @@ public void testFileAutoReload() throws Exception { "1st file parsing", logger.getName(), Level.INFO, - "parsed [3] group(s) with a total of [4] operator user(s) from file [" + inUseFile.toAbsolutePath() + "]" + "parsed [4] group(s) with a total of [5] operator user(s) from file [" + inUseFile.toAbsolutePath() + "]" ) ); final FileOperatorUsersStore fileOperatorUsersStore = new FileOperatorUsersStore(env, watcherService); final List groups = fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups(); - assertEquals(3, groups.size()); + assertEquals(4, groups.size()); assertEquals(new FileOperatorUsersStore.Group(Set.of("operator_1", "operator_2"), "file"), groups.get(0)); assertEquals(new FileOperatorUsersStore.Group(Set.of("operator_3"), null), groups.get(1)); assertEquals( @@ -196,6 +207,7 @@ public void testFileAutoReload() throws Exception { ), groups.get(2) ); + assertEquals(new FileOperatorUsersStore.Group(Set.of("me@elastic.co"), "jwt1", "jwt", "realm", null, null), groups.get(3)); appender.assertAllExpectationsMatched(); // Content does not change, the groups should not be updated @@ -220,8 +232,8 @@ public void testFileAutoReload() throws Exception { } assertBusy(() -> { final List newGroups = fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups(); - assertEquals(4, newGroups.size()); - assertEquals(new FileOperatorUsersStore.Group(Set.of("operator_4")), newGroups.get(3)); + assertEquals(5, newGroups.size()); + assertEquals(new FileOperatorUsersStore.Group(Set.of("operator_4")), newGroups.get(4)); }); appender.assertAllExpectationsMatched(); @@ -233,14 +245,14 @@ public void testFileAutoReload() throws Exception { Level.ERROR, "Failed to parse operator users file", XContentParseException.class, - "[15:1] [operator_privileges.operator] failed to parse field [operator]" + "[19:1] [operator_privileges.operator] failed to parse field [operator]" ) ); try (BufferedWriter writer = Files.newBufferedWriter(inUseFile, StandardCharsets.UTF_8, StandardOpenOption.APPEND)) { writer.append(" - blah\n"); } watcherService.notifyNow(ResourceWatcherService.Frequency.HIGH); - assertEquals(4, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size()); + assertEquals(5, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size()); appender.assertAllExpectationsMatched(); // Delete the file will remove all the operator users @@ -259,7 +271,7 @@ public void testFileAutoReload() throws Exception { // Back to original content Files.copy(sampleFile, inUseFile, StandardCopyOption.REPLACE_EXISTING); - assertBusy(() -> assertEquals(3, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size())); + assertBusy(() -> assertEquals(4, fileOperatorUsersStore.getOperatorUsersDescriptor().getGroups().size())); } finally { Loggers.removeAppender(logger, appender); appender.stop(); @@ -340,11 +352,14 @@ public void testParseConfig() throws IOException { auth_type: "token" token_source: "index" token_names: ["token1", "token2"] + - usernames: ["myprinc", "myprinc2"] + realm_type: "jwt" + realm_name: "jwt1" """; try (ByteArrayInputStream in = new ByteArrayInputStream(config.getBytes(StandardCharsets.UTF_8))) { final List groups = FileOperatorUsersStore.parseConfig(in).getGroups(); - assertEquals(2, groups.size()); + assertEquals(3, groups.size()); assertEquals(new FileOperatorUsersStore.Group(Set.of("internal_system"), "file1"), groups.get(0)); assertEquals( new FileOperatorUsersStore.Group( @@ -357,6 +372,10 @@ public void testParseConfig() throws IOException { ), groups.get(1) ); + assertEquals( + new FileOperatorUsersStore.Group(Set.of("myprinc", "myprinc2"), "jwt1", "jwt", "realm", null, null), + groups.get(2) + ); } } @@ -370,7 +389,7 @@ public void testParseInvalidConfig() throws IOException { final XContentParseException e = expectThrows(XContentParseException.class, () -> FileOperatorUsersStore.parseConfig(in)); assertThat( e.getCause().getCause().getMessage(), - containsString("[realm_type] requires [file] when [auth_type] is [realm] or not specified") + containsString("when [auth_type] is defined as [realm] then [realm_type] must be defined as [file] or [jwt]") ); } @@ -425,10 +444,6 @@ public void testParseInvalidConfig() throws IOException { e.getCause().getCause().getMessage(), containsString("[realm_name] must be specified for realm types other than [_service_account,file,native,reserved]") ); - assertThat( - e.getCause().getCause().getMessage(), - containsString("[realm_type] requires [file] when [auth_type] is [realm] or not specified") - ); } config = """ diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java index 2aa41f87757d5..9bd5d416940d3 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/SecurityServerTransportInterceptorTests.java @@ -8,7 +8,7 @@ import org.elasticsearch.ElasticsearchSecurityException; import org.elasticsearch.TransportVersion; -import org.elasticsearch.Version; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.state.ClusterStateAction; import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest; @@ -407,7 +407,7 @@ public void sendRequest( } }; AsyncSender sender = interceptor.interceptSender(intercepted); - final TransportVersion connectionVersion = TransportVersion.fromId(Version.CURRENT.id - randomIntBetween(100, 100000)); + final TransportVersion connectionVersion = TransportVersion.fromId(TransportVersion.current().id() - randomIntBetween(100, 100000)); assertEquals(connectionVersion, TransportVersion.min(connectionVersion, TransportVersion.current())); Transport.Connection connection = mock(Transport.Connection.class); @@ -570,7 +570,7 @@ public void testProfileSecuredRequestHandlerDecrementsRefCountOnFailure() throws logger, DeleteIndexAction.NAME, randomBoolean(), - randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC, + threadPool.executor(randomBoolean() ? ThreadPool.Names.SAME : ThreadPool.Names.GENERIC), (request, channel, task) -> fail("should fail at destructive operations check to trigger listener failure"), Map.of( profileName, @@ -986,7 +986,7 @@ public void sendRequest( ); final TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), - TransportVersion.V_7_17_0, + TransportVersions.V_7_17_0, versionBeforeCrossClusterAccessRealm ); when(connection.getTransportVersion()).thenReturn(version); diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java index 12198b67af88a..df6ba3abda55d 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4HeaderSizeLimitTests.java @@ -18,6 +18,7 @@ import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.MockPageCacheRecycler; import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; import org.elasticsearch.mocksocket.MockSocket; import org.elasticsearch.tasks.TaskManager; @@ -33,6 +34,7 @@ import org.elasticsearch.transport.netty4.SharedGroupFactory; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import org.junit.After; import org.junit.Before; @@ -89,7 +91,8 @@ public void startThreadPool() { new NoneCircuitBreakerService(), null, mock(SSLService.class), - new SharedGroupFactory(settings) + new SharedGroupFactory(settings), + mock(CrossClusterAccessAuthenticationService.class) ); requestIdReceived = new AtomicLong(-1L); securityNettyTransport.setMessageListener(new TransportMessageListener() { @@ -104,7 +107,7 @@ public void onRequestReceived(long requestId, String action) { TestRequest::new, taskManager, (request, channel, task) -> channel.sendResponse(TransportResponse.Empty.INSTANCE), - ThreadPool.Names.SAME, + EsExecutors.DIRECT_EXECUTOR_SERVICE, false, true, Tracer.NOOP diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java new file mode 100644 index 0000000000000..600d4d1ba5fa4 --- /dev/null +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SecurityNetty4ServerTransportAuthenticationTests.java @@ -0,0 +1,398 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.security.transport.netty4; + +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.remote.RemoteClusterNodesAction; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodeRole; +import org.elasticsearch.cluster.node.VersionInformation; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.RecyclerBytesStreamOutput; +import org.elasticsearch.common.network.NetworkService; +import org.elasticsearch.common.recycler.Recycler; +import org.elasticsearch.common.settings.MockSecureSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.transport.TransportAddress; +import org.elasticsearch.common.util.PageCacheRecycler; +import org.elasticsearch.core.IOUtils; +import org.elasticsearch.indices.breaker.NoneCircuitBreakerService; +import org.elasticsearch.mocksocket.MockSocket; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.NodeRoles; +import org.elasticsearch.test.transport.MockTransportService; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.BytesRefRecycler; +import org.elasticsearch.transport.Compression; +import org.elasticsearch.transport.ProxyConnectionStrategy; +import org.elasticsearch.transport.RemoteClusterPortSettings; +import org.elasticsearch.transport.RemoteClusterService; +import org.elasticsearch.transport.RemoteConnectionStrategy; +import org.elasticsearch.transport.RemoteTransportException; +import org.elasticsearch.transport.SniffConnectionStrategy; +import org.elasticsearch.transport.TestOutboundRequestMessage; +import org.elasticsearch.transport.TransportInterceptor; +import org.elasticsearch.transport.TransportRequest; +import org.elasticsearch.transport.TransportRequestHandler; +import org.elasticsearch.transport.netty4.SharedGroupFactory; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; +import org.junit.After; +import org.junit.Before; + +import java.net.InetSocketAddress; +import java.net.Socket; +import java.net.SocketTimeoutException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Executor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.elasticsearch.test.ActionListenerUtils.anyActionListener; +import static org.elasticsearch.test.NodeRoles.onlyRole; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.mock; + +public class SecurityNetty4ServerTransportAuthenticationTests extends ESTestCase { + + private ThreadPool threadPool; + // is non-null when authn passes successfully + private AtomicReference authenticationException; + private String remoteClusterName; + private SecurityNetty4ServerTransport remoteSecurityNetty4ServerTransport; + private MockTransportService remoteTransportService; + + @SuppressWarnings("unchecked") + @Override + @Before + public void setUp() throws Exception { + super.setUp(); + threadPool = new TestThreadPool(getClass().getName()); + authenticationException = new AtomicReference<>(); + remoteClusterName = "test-remote_cluster_service_" + randomAlphaOfLength(8); + Settings remoteSettings = Settings.builder() + .put("node.name", getClass().getName()) + .put(ClusterName.CLUSTER_NAME_SETTING.getKey(), remoteClusterName) + .put(XPackSettings.TRANSPORT_SSL_ENABLED.getKey(), "false") + .put(XPackSettings.REMOTE_CLUSTER_SERVER_SSL_ENABLED.getKey(), "false") + .put(XPackSettings.REMOTE_CLUSTER_CLIENT_SSL_ENABLED.getKey(), "false") + .put(RemoteClusterPortSettings.REMOTE_CLUSTER_SERVER_ENABLED.getKey(), "true") + .put(RemoteClusterPortSettings.PORT.getKey(), 0) + .put("transport.ignore_deserialization_errors", true) + .build(); + remoteSettings = NodeRoles.nonRemoteClusterClientNode(remoteSettings); + CrossClusterAccessAuthenticationService remoteCrossClusterAccessAuthenticationService = mock( + CrossClusterAccessAuthenticationService.class + ); + doAnswer(invocation -> { + Exception authnException = authenticationException.get(); + if (authnException != null) { + ((ActionListener) invocation.getArguments()[1]).onFailure(authnException); + } else { + ((ActionListener) invocation.getArguments()[1]).onResponse(null); + } + return null; + }).when(remoteCrossClusterAccessAuthenticationService).tryAuthenticate(any(Map.class), anyActionListener()); + remoteSecurityNetty4ServerTransport = new SecurityNetty4ServerTransport( + remoteSettings, + TransportVersion.current(), + threadPool, + new NetworkService(List.of()), + PageCacheRecycler.NON_RECYCLING_INSTANCE, + new NamedWriteableRegistry(List.of()), + new NoneCircuitBreakerService(), + null, + mock(SSLService.class), + new SharedGroupFactory(remoteSettings), + remoteCrossClusterAccessAuthenticationService + ); + remoteTransportService = MockTransportService.createNewService( + remoteSettings, + remoteSecurityNetty4ServerTransport, + VersionInformation.CURRENT, + threadPool, + null, + Collections.emptySet(), + // IMPORTANT: we have to mock authentication in two places: one in the "CrossClusterAccessAuthenticationService" and the + // other before the action handler here. This is in order to accurately simulate the complete Elasticsearch node behavior. + new TransportInterceptor() { + @Override + public TransportRequestHandler interceptHandler( + String action, + Executor executor, + boolean forceExecution, + TransportRequestHandler actualHandler + ) { + return (request, channel, task) -> { + Exception authnException = authenticationException.get(); + if (authnException != null) { + channel.sendResponse(authnException); + } else { + actualHandler.messageReceived(request, channel, task); + } + }; + } + } + ); + DiscoveryNode remoteNode = remoteTransportService.getLocalDiscoNode(); + remoteTransportService.registerRequestHandler( + RemoteClusterNodesAction.NAME, + ThreadPool.Names.SAME, + RemoteClusterNodesAction.Request::new, + (request, channel, task) -> channel.sendResponse(new RemoteClusterNodesAction.Response(List.of(remoteNode))) + ); + remoteTransportService.start(); + remoteTransportService.acceptIncomingRequests(); + } + + @Override + @After + public void tearDown() throws Exception { + logger.info("tearDown"); + super.tearDown(); + IOUtils.close( + remoteTransportService, + remoteSecurityNetty4ServerTransport, + () -> ThreadPool.terminate(threadPool, 10, TimeUnit.SECONDS) + ); + } + + public void testProxyStrategyConnectionClosesWhenAuthenticatorAlwaysFails() throws Exception { + // all requests fail authn + authenticationException.set(new ElasticsearchSecurityException("authn failure")); + try ( + MockTransportService localService = MockTransportService.createNewService( + proxyLocalTransportSettings(), + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ) + ) { + localService.start(); + // all attempts to obtain a connections will fail + for (int i = 0; i < randomIntBetween(2, 4); i++) { + CountDownLatch connectionTestDone = new CountDownLatch(1); + // {@code RemoteClusterService.REMOTE_CLUSTER_HANDSHAKE_ACTION_NAME} fails authn (both of them) and the connection is + // always closed after receiving an error response + localService.getRemoteClusterService() + .maybeEnsureConnectedAndGetConnection(remoteClusterName, true, ActionListener.wrap(connection -> { + logger.info("Unexpected: a connection is available"); + connectionTestDone.countDown(); + fail("No connection should be available if authn fails"); + }, e -> { + logger.info("Expected: no connection could not be established"); + connectionTestDone.countDown(); + assertThat(e, instanceOf(RemoteTransportException.class)); + assertThat(e.getCause(), instanceOf(authenticationException.get().getClass())); + })); + assertTrue(connectionTestDone.await(10L, TimeUnit.SECONDS)); + } + } + // but if authn passes, valid connections are available + authenticationException.set(null); + try ( + MockTransportService localService = MockTransportService.createNewService( + proxyLocalTransportSettings(), + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ) + ) { + localService.start(); + CountDownLatch connectionTestDone = new CountDownLatch(1); + localService.getRemoteClusterService() + .maybeEnsureConnectedAndGetConnection(remoteClusterName, true, ActionListener.wrap(connection -> { + logger.info("Expected: a connection is available"); + connectionTestDone.countDown(); + }, e -> { + logger.info("Unexpected: no connection could be established"); + connectionTestDone.countDown(); + fail("connection could not be established"); + throw new RuntimeException(e); + })); + assertTrue(connectionTestDone.await(10L, TimeUnit.SECONDS)); + } + } + + public void testSniffStrategyNoConnectionWhenAuthenticatorAlwaysFails() throws Exception { + // all requests fail authn + authenticationException.set(new ElasticsearchSecurityException("authn failure")); + try ( + MockTransportService localService = MockTransportService.createNewService( + sniffLocalTransportSettings(), + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ) + ) { + localService.start(); + // obtain some connections and check that they'll be promptly closed + for (int i = 0; i < randomIntBetween(2, 4); i++) { + CountDownLatch connectionTestDone = new CountDownLatch(1); + // the failed authentication during handshake must surely close the connection before + // {@code RemoteClusterNodesAction.NAME} is executed, so node sniffing will fail + localService.getRemoteClusterService() + .maybeEnsureConnectedAndGetConnection(remoteClusterName, true, ActionListener.wrap(connection -> { + logger.info("Unexpected: a connection is available"); + connectionTestDone.countDown(); + fail("No connection should be available if authn fails"); + }, e -> { + logger.info("Expected: no connection could be established"); + connectionTestDone.countDown(); + assertThat(e, instanceOf(RemoteTransportException.class)); + assertThat(e.getCause(), instanceOf(authenticationException.get().getClass())); + })); + assertTrue(connectionTestDone.await(10L, TimeUnit.SECONDS)); + } + } + // but if authn passes, valid connections are available + authenticationException.set(null); + try ( + MockTransportService localService = MockTransportService.createNewService( + sniffLocalTransportSettings(), + VersionInformation.CURRENT, + TransportVersion.current(), + threadPool + ) + ) { + localService.start(); + CountDownLatch connectionTestDone = new CountDownLatch(1); + localService.getRemoteClusterService() + .maybeEnsureConnectedAndGetConnection(remoteClusterName, true, ActionListener.wrap(connection -> { + logger.info("Expected: a connection is available"); + connectionTestDone.countDown(); + }, e -> { + logger.info("Unexpected: no connection could be established"); + connectionTestDone.countDown(); + fail("connection could not be established"); + throw new RuntimeException(e); + })); + assertTrue(connectionTestDone.await(10L, TimeUnit.SECONDS)); + } + } + + public void testConnectionWorksForPing() throws Exception { + authenticationException.set(new ElasticsearchSecurityException("authn failure")); + TransportAddress[] boundRemoteIngressAddresses = remoteSecurityNetty4ServerTransport.boundRemoteIngressAddress().boundAddresses(); + InetSocketAddress remoteIngressTransportAddress = randomFrom(boundRemoteIngressAddresses).address(); + // ping message + final BytesStreamOutput bytesStreamOutput = new BytesStreamOutput(); + bytesStreamOutput.writeBytes(new byte[] { (byte) 'E', (byte) 'S' }); + bytesStreamOutput.writeInt(-1); + try (Socket socket = new MockSocket(remoteIngressTransportAddress.getAddress(), remoteIngressTransportAddress.getPort())) { + final byte[] pingBytes = Arrays.copyOfRange(bytesStreamOutput.bytes().array(), 0, 6); + socket.getOutputStream().write(pingBytes); + socket.getOutputStream().flush(); + // We should receive the ping back + final byte[] responseBytes = socket.getInputStream().readNBytes(6); + assertThat(responseBytes, equalTo(pingBytes)); + try { + socket.setSoTimeout(1000); + socket.getInputStream().read(); + fail("should not reach here"); + } catch (SocketTimeoutException e) { + // timeout exception means the server is still connected. Just no data is coming which is normal + } + } + } + + public void testConnectionDisconnectedWhenAuthnFails() throws Exception { + authenticationException.set(new ElasticsearchSecurityException("authn failure")); + TransportAddress[] boundRemoteIngressAddresses = remoteSecurityNetty4ServerTransport.boundRemoteIngressAddress().boundAddresses(); + InetSocketAddress remoteIngressTransportAddress = randomFrom(boundRemoteIngressAddresses).address(); + try (Socket socket = new MockSocket(remoteIngressTransportAddress.getAddress(), remoteIngressTransportAddress.getPort())) { + TestOutboundRequestMessage message = new TestOutboundRequestMessage( + threadPool.getThreadContext(), + TransportRequest.Empty.INSTANCE, + TransportVersion.current(), + "internal:whatever", + randomNonNegativeLong(), + false, + randomFrom(Compression.Scheme.DEFLATE, Compression.Scheme.LZ4, null) + ); + Recycler recycler = new BytesRefRecycler(PageCacheRecycler.NON_RECYCLING_INSTANCE); + RecyclerBytesStreamOutput out = new RecyclerBytesStreamOutput(recycler); + BytesReference bytesReference = message.serialize(out); + socket.getOutputStream().write(Arrays.copyOfRange(bytesReference.array(), 0, bytesReference.length())); + socket.getOutputStream().flush(); + + final String response = new String(socket.getInputStream().readAllBytes(), StandardCharsets.UTF_8); + assertThat(response, containsString("authn failure")); + // -1 means the other side has disconnected + assertThat(socket.getInputStream().read(), equalTo(-1)); + } + } + + private Settings sniffLocalTransportSettings() { + Settings localSettings = Settings.builder() + .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) + .put(RemoteConnectionStrategy.REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(remoteClusterName).getKey(), "sniff") + .put( + SniffConnectionStrategy.REMOTE_CLUSTER_SEEDS.getConcreteSettingForNamespace(remoteClusterName).getKey(), + remoteTransportService.boundRemoteAccessAddress().publishAddress().toString() + ) + .put( + SniffConnectionStrategy.REMOTE_CONNECTIONS_PER_CLUSTER.getKey(), + randomIntBetween(1, 3) // easier to debug with just 1 connection + ) + .put( + SniffConnectionStrategy.REMOTE_NODE_CONNECTIONS.getConcreteSettingForNamespace(remoteClusterName).getKey(), + randomIntBetween(1, 3) // easier to debug with just 1 connection + ) + .build(); + { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString( + RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getConcreteSettingForNamespace(remoteClusterName).getKey(), + randomAlphaOfLength(20) + ); + return Settings.builder().put(localSettings).setSecureSettings(secureSettings).build(); + } + } + + private Settings proxyLocalTransportSettings() { + Settings localSettings = Settings.builder() + .put(onlyRole(DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE)) + .put(RemoteConnectionStrategy.REMOTE_CONNECTION_MODE.getConcreteSettingForNamespace(remoteClusterName).getKey(), "proxy") + .put( + ProxyConnectionStrategy.PROXY_ADDRESS.getConcreteSettingForNamespace(remoteClusterName).getKey(), + remoteTransportService.boundRemoteAccessAddress().publishAddress().toString() + ) + .put( + ProxyConnectionStrategy.REMOTE_SOCKET_CONNECTIONS.getConcreteSettingForNamespace(remoteClusterName).getKey(), + randomIntBetween(1, 3) // easier to debug with just 1 connection + ) + .build(); + { + final MockSecureSettings secureSettings = new MockSecureSettings(); + secureSettings.setString( + RemoteClusterService.REMOTE_CLUSTER_CREDENTIALS.getConcreteSettingForNamespace(remoteClusterName).getKey(), + randomAlphaOfLength(20) + ); + return Settings.builder().put(localSettings).setSecureSettings(secureSettings).build(); + } + } + +} diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java index 18cf0418e0942..ce3260a7875a1 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/transport/netty4/SimpleSecurityNetty4ServerTransportTests.java @@ -14,6 +14,7 @@ import org.apache.lucene.util.Constants; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.cluster.node.DiscoveryNode; @@ -58,6 +59,7 @@ import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.common.socket.SocketAccess; import org.elasticsearch.xpack.core.ssl.SSLService; +import org.elasticsearch.xpack.security.authc.CrossClusterAccessAuthenticationService; import org.elasticsearch.xpack.security.transport.SSLEngineUtils; import org.elasticsearch.xpack.security.transport.filter.IPFilter; @@ -107,6 +109,7 @@ import static org.hamcrest.Matchers.notNullValue; import static org.hamcrest.Matchers.nullValue; import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; public class SimpleSecurityNetty4ServerTransportTests extends AbstractSimpleTransportTestCase { @Override @@ -1050,7 +1053,8 @@ static class TestSecurityNetty4ServerTransport extends SecurityNetty4ServerTrans circuitBreakerService, authenticator, sslService, - sharedGroupFactory + sharedGroupFactory, + mock(CrossClusterAccessAuthenticationService.class) ); this.doHandshake = doHandshake; } @@ -1066,7 +1070,7 @@ public void executeHandshake( super.executeHandshake(node, channel, profile, listener); } else { assert getVersion().equals(TransportVersion.current()); - listener.onResponse(TransportVersion.MINIMUM_COMPATIBLE); + listener.onResponse(TransportVersions.MINIMUM_COMPATIBLE); } } diff --git a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/operator/operator_users.yml b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/operator/operator_users.yml index 6547a453b976d..b7562b9facf75 100644 --- a/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/operator/operator_users.yml +++ b/x-pack/plugin/security/src/test/resources/org/elasticsearch/xpack/security/operator/operator_users.yml @@ -9,3 +9,7 @@ operator: auth_type: "token" token_source: "file" token_names: [ "kibana-token" ] + - usernames: [ "me@elastic.co" ] + realm_type: "jwt" + realm_name: "jwt1" + diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java index 96676ea0f5598..b087577b797af 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/GetShutdownStatusAction.java @@ -79,7 +79,7 @@ public Response(List shutdownStatuses) { } public Response(StreamInput in) throws IOException { - this.shutdownStatuses = in.readList(SingleNodeShutdownStatus::new); + this.shutdownStatuses = in.readCollectionAsList(SingleNodeShutdownStatus::new); } public List getShutdownStatuses() { @@ -102,7 +102,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws @Override public void writeTo(StreamOutput out) throws IOException { - out.writeList(shutdownStatuses); + out.writeCollection(shutdownStatuses); } @Override diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java index 7ee70f45053fe..442557f17a562 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java @@ -134,7 +134,7 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources .values() .stream() .filter(metadata -> snapshotFailuresExceedWarningCount(failedSnapshotWarnThreshold, metadata)) - .sorted(Comparator.comparing(SnapshotLifecyclePolicyMetadata::getName)) + .sorted(Comparator.comparing(SnapshotLifecyclePolicyMetadata::getId)) .toList(); if (unhealthyPolicies.size() > 0) { @@ -152,7 +152,7 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources String unhealthyPolicyCauses = unhealthyPolicies.stream() .map( policy -> "- [" - + policy.getName() + + policy.getId() + "] had [" + policy.getInvocationsSinceLastSuccess() + "] repeated failures without successful execution" @@ -166,7 +166,7 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources : "An automated snapshot policy is unhealthy:\n") + unhealthyPolicyCauses; String unhealthyPolicyActions = unhealthyPolicies.stream() - .map(policy -> "- GET /_slm/policy/" + policy.getPolicy().getId() + "?human") + .map(policy -> "- GET /_slm/policy/" + policy.getId() + "?human") .collect(Collectors.joining("\n")); String action = "Check the snapshot lifecycle " + (unhealthyPolicies.size() > 1 ? "policies" : "policy") @@ -185,7 +185,7 @@ public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResources new Diagnosis.Resource( Diagnosis.Resource.Type.SLM_POLICY, unhealthyPolicies.stream() - .map(SnapshotLifecyclePolicyMetadata::getName) + .map(SnapshotLifecyclePolicyMetadata::getId) .limit(Math.min(unhealthyPolicies.size(), maxAffectedResourcesCount)) .toList() ) @@ -242,7 +242,7 @@ private static HealthIndicatorDetails createDetails( unhealthyPolicies.stream() .collect( Collectors.toMap( - SnapshotLifecyclePolicyMetadata::getName, + SnapshotLifecyclePolicyMetadata::getId, SnapshotLifecyclePolicyMetadata::getInvocationsSinceLastSuccess ) ) diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java index bc3aa7258a5f8..2d5a6a800ffa2 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java @@ -181,7 +181,7 @@ public void testIsYellowWhenPoliciesHaveFailedForMoreThanWarningThreshold() { Map.of( "test-policy", SnapshotLifecyclePolicyMetadata.builder() - .setPolicy(new SnapshotLifecyclePolicy("policy-id-1", "test-policy", "", "test-repository", null, null)) + .setPolicy(new SnapshotLifecyclePolicy("test-policy", "", "", "test-repository", null, null)) .setVersion(1L) .setModifiedDate(System.currentTimeMillis()) .setLastSuccess(snapshotInvocation(execTime, execTime + 1000L)) @@ -191,7 +191,14 @@ public void testIsYellowWhenPoliciesHaveFailedForMoreThanWarningThreshold() { "test-policy-without-any-success", SnapshotLifecyclePolicyMetadata.builder() .setPolicy( - new SnapshotLifecyclePolicy("policy-id-2", "test-policy-without-any-success", "", "test-repository", null, null) + new SnapshotLifecyclePolicy( + "test-policy-without-any-success", + "", + "", + "test-repository", + null, + null + ) ) .setVersion(1L) .setModifiedDate(System.currentTimeMillis()) @@ -203,8 +210,8 @@ public void testIsYellowWhenPoliciesHaveFailedForMoreThanWarningThreshold() { SnapshotLifecyclePolicyMetadata.builder() .setPolicy( new SnapshotLifecyclePolicy( - "policy-id-3", "test-policy-without-success-start-time", + "", "", "test-repository", null, @@ -280,9 +287,9 @@ public void testIsYellowWhenPoliciesHaveFailedForMoreThanWarningThreshold() { + failedInvocations3 + "] repeated failures without successful execution", "Check the snapshot lifecycle policies for detailed failure info:\n" - + "- GET /_slm/policy/policy-id-1?human\n" - + "- GET /_slm/policy/policy-id-2?human\n" - + "- GET /_slm/policy/policy-id-3?human" + + "- GET /_slm/policy/test-policy?human\n" + + "- GET /_slm/policy/test-policy-without-any-success?human\n" + + "- GET /_slm/policy/test-policy-without-success-start-time?human" ), List.of( diff --git a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java index 5729bca4fe777..8a00c453382fb 100644 --- a/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java +++ b/x-pack/plugin/snapshot-based-recoveries/src/test/java/org/elasticsearch/xpack/snapshotbasedrecoveries/recovery/plan/SnapshotsRecoveryPlannerServiceTests.java @@ -71,7 +71,7 @@ public class SnapshotsRecoveryPlannerServiceTests extends ESTestCase { private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings( "index", - Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT).build() + Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, IndexVersion.current()).build() ); private static final ByteSizeValue PART_SIZE = ByteSizeValue.ofBytes(Long.MAX_VALUE); private static final ShardId shardId = new ShardId(INDEX_SETTINGS.getIndex(), 1); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java index 2d498ff23b8e8..a5ce2b49d9c2a 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisFailureIT.java @@ -420,6 +420,9 @@ public BlobContainer blobContainer(BlobPath path) { } } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) {} + private void deleteContainer(DisruptableBlobContainer container) { blobContainer = null; } diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java index 6c66ecb0674c4..b29940964e942 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalysisSuccessIT.java @@ -240,6 +240,9 @@ private void deleteContainer(AssertingBlobContainer container) { } } + @Override + public void deleteBlobsIgnoringIfNotExists(Iterator blobNames) {} + @Override public void close() {} diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java index 89a9b7c93113e..e5ebe47d177ff 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/BlobAnalyzeAction.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionRequest; @@ -694,12 +694,12 @@ public static class Request extends ActionRequest { blobName = in.readString(); targetLength = in.readVLong(); seed = in.readLong(); - nodes = in.readList(DiscoveryNode::new); + nodes = in.readCollectionAsList(DiscoveryNode::new); readNodeCount = in.readVInt(); earlyReadNodeCount = in.readVInt(); readEarly = in.readBoolean(); writeAndOverwrite = in.readBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { abortWrite = in.readBoolean(); } else { abortWrite = false; @@ -714,12 +714,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(blobName); out.writeVLong(targetLength); out.writeLong(seed); - out.writeList(nodes); + out.writeCollection(nodes); out.writeVInt(readNodeCount); out.writeVInt(earlyReadNodeCount); out.writeBoolean(readEarly); out.writeBoolean(writeAndOverwrite); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeBoolean(abortWrite); } else if (abortWrite) { throw new IllegalStateException("cannot send abortWrite request on transport version [" + out.getTransportVersion() + "]"); @@ -841,7 +841,7 @@ public Response(StreamInput in) throws IOException { writeElapsedNanos = in.readVLong(); overwriteElapsedNanos = in.readVLong(); writeThrottledNanos = in.readVLong(); - readDetails = in.readList(ReadDetail::new); + readDetails = in.readCollectionAsList(ReadDetail::new); } @Override @@ -857,7 +857,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(writeElapsedNanos); out.writeVLong(overwriteElapsedNanos); out.writeVLong(writeThrottledNanos); - out.writeList(readDetails); + out.writeCollection(readDetails); } @Override diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RegisterAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RegisterAnalyzeAction.java index 81a21836bb325..b12f23c4b9afa 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RegisterAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RegisterAnalyzeAction.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionRequest; import org.elasticsearch.action.ActionRequestValidationException; @@ -192,7 +192,7 @@ public Request(String repositoryName, String containerPath, String registerName, public Request(StreamInput in) throws IOException { super(in); - assert in.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0); + assert in.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0); repositoryName = in.readString(); containerPath = in.readString(); registerName = in.readString(); @@ -202,7 +202,7 @@ public Request(StreamInput in) throws IOException { @Override public void writeTo(StreamOutput out) throws IOException { - assert out.getTransportVersion().onOrAfter(TransportVersion.V_8_8_0); + assert out.getTransportVersion().onOrAfter(TransportVersions.V_8_8_0); super.writeTo(out); out.writeString(repositoryName); out.writeString(containerPath); diff --git a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java index 42238350cea58..88ee149919874 100644 --- a/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java +++ b/x-pack/plugin/snapshot-repo-test-kit/src/main/java/org/elasticsearch/repositories/blobstore/testkit/RepositoryAnalyzeAction.java @@ -10,7 +10,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.ExceptionsHelper; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; @@ -802,7 +802,7 @@ public Request(StreamInput in) throws IOException { maxTotalDataSize = ByteSizeValue.readFrom(in); detailed = in.readBoolean(); reroutedFrom = in.readOptionalWriteable(DiscoveryNode::new); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { abortWritePermitted = in.readBoolean(); } else { abortWritePermitted = false; @@ -829,7 +829,7 @@ public void writeTo(StreamOutput out) throws IOException { maxTotalDataSize.writeTo(out); out.writeBoolean(detailed); out.writeOptionalWriteable(reroutedFrom); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeBoolean(abortWritePermitted); } else if (abortWritePermitted) { throw new IllegalStateException( @@ -1082,7 +1082,7 @@ public Response(StreamInput in) throws IOException { rareActionProbability = in.readDouble(); blobPath = in.readString(); summary = new RepositoryPerformanceSummary(in); - blobResponses = in.readList(BlobAnalyzeAction.Response::new); + blobResponses = in.readCollectionAsList(BlobAnalyzeAction.Response::new); listingTimeNanos = in.readVLong(); deleteTimeNanos = in.readVLong(); } @@ -1102,7 +1102,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(rareActionProbability); out.writeString(blobPath); summary.writeTo(out); - out.writeList(blobResponses); + out.writeCollection(blobResponses); out.writeVLong(listingTimeNanos); out.writeVLong(deleteTimeNanos); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java index 4ef57e34422b6..22f8ae8969ec7 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/SpatialPlugin.java @@ -44,6 +44,7 @@ import org.elasticsearch.xpack.spatial.action.SpatialInfoTransportAction; import org.elasticsearch.xpack.spatial.action.SpatialStatsTransportAction; import org.elasticsearch.xpack.spatial.action.SpatialUsageTransportAction; +import org.elasticsearch.xpack.spatial.common.CartesianBoundingBox; import org.elasticsearch.xpack.spatial.index.mapper.GeoShapeWithDocValuesFieldMapper; import org.elasticsearch.xpack.spatial.index.mapper.PointFieldMapper; import org.elasticsearch.xpack.spatial.index.mapper.ShapeFieldMapper; @@ -191,6 +192,11 @@ public Map getProcessors(Processor.Parameters paramet return Map.of(CircleProcessor.TYPE, new CircleProcessor.Factory(), GeoGridProcessor.TYPE, new GeoGridProcessor.Factory()); } + @Override + public List getGenericNamedWriteables() { + return List.of(new GenericNamedWriteableSpec(CartesianBoundingBox.class.getSimpleName(), CartesianBoundingBox::new)); + } + private static void registerGeoShapeBoundsAggregator(ValuesSourceRegistry.Builder builder) { builder.register( GeoBoundsAggregationBuilder.REGISTRY_KEY, diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java index 8aeef2fbdc587..7bc2f8d06c945 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/common/CartesianBoundingBox.java @@ -8,6 +8,8 @@ package org.elasticsearch.xpack.spatial.common; import org.elasticsearch.ElasticsearchParseException; +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.BoundingBox; import org.elasticsearch.common.geo.GeoUtils; import org.elasticsearch.common.geo.SpatialPoint; @@ -56,6 +58,16 @@ public void writeTo(StreamOutput out) throws IOException { out.writeDouble(bottomRight.getY()); } + @Override + public final String getWriteableName() { + return "CartesianBoundingBox"; + } + + @Override + public final TransportVersion getMinimalSupportedVersion() { + return TransportVersions.V_8_500_070; + } + protected static class CartesianBoundsParser extends BoundsParser { CartesianBoundsParser(XContentParser parser) { super(parser); diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/GeoGridQueryBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/GeoGridQueryBuilder.java index 7fb42dd024b14..843842cf863c7 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/GeoGridQueryBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/GeoGridQueryBuilder.java @@ -13,6 +13,7 @@ import org.apache.lucene.search.Query; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.geo.ShapeRelation; import org.elasticsearch.common.io.stream.StreamInput; @@ -398,6 +399,6 @@ public String getWriteableName() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_3_0; + return TransportVersions.V_8_3_0; } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java index 53b59d17e284f..c786b47e2a9a1 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/index/query/ShapeQueryBuilder.java @@ -9,6 +9,7 @@ import org.apache.lucene.search.ConstantScoreQuery; import org.apache.lucene.search.Query; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeometryParser; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -175,6 +176,6 @@ public static ShapeQueryBuilder fromXContent(XContentParser parser) throws IOExc @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_4_0; + return TransportVersions.V_7_4_0; } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregationBuilder.java index 114c67b82e73f..e232ec9e463d9 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/GeoLineAggregationBuilder.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.spatial.search.aggregations; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -211,6 +212,6 @@ public String getType() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_7_11_0; + return TransportVersions.V_7_11_0; } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java index e8a210549d0b7..0de11109e33e7 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/InternalGeoLine.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.spatial.search.aggregations; import org.apache.lucene.geo.GeoEncodingUtils; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationReduceContext; @@ -88,7 +88,7 @@ public InternalGeoLine(StreamInput in) throws IOException { this.includeSorts = in.readBoolean(); this.sortOrder = SortOrder.readFromStream(in); this.size = in.readVInt(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { nonOverlapping = in.readBoolean(); simplified = in.readBoolean(); } else { @@ -105,7 +105,7 @@ protected void doWriteTo(StreamOutput out) throws IOException { out.writeBoolean(includeSorts); sortOrder.writeTo(out); out.writeVInt(size); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_500_020)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_500_020)) { out.writeBoolean(nonOverlapping); out.writeBoolean(simplified); } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java index ad5a4371114f3..b3aa1f5e440d8 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/bucket/geogrid/GeoHexGridAggregationBuilder.java @@ -9,6 +9,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.geo.GeoBoundingBox; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.xcontent.support.XContentMapValues; @@ -124,6 +125,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_1_0; + return TransportVersions.V_8_1_0; } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java index 5d2e63b8986c6..a7385b30ea165 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianBoundsAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.spatial.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -105,6 +106,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_6_0; + return TransportVersions.V_8_6_0; } } diff --git a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java index 80191803ff172..dbd8c41af671b 100644 --- a/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java +++ b/x-pack/plugin/spatial/src/main/java/org/elasticsearch/xpack/spatial/search/aggregations/metrics/CartesianCentroidAggregationBuilder.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.spatial.search.aggregations.metrics; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.search.aggregations.AggregationBuilder; @@ -112,6 +113,6 @@ protected ValuesSourceRegistry.RegistryKey getRegistryKey() { @Override public TransportVersion getMinimalSupportedVersion() { - return TransportVersion.V_8_6_0; + return TransportVersions.V_8_6_0; } } diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java index 6c1ba3100244c..8129b26c28241 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/SpatialPluginTests.java @@ -7,11 +7,14 @@ package org.elasticsearch.xpack.spatial; import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.common.io.stream.GenericNamedWriteable; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.license.License; import org.elasticsearch.license.TestUtils; import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.plugins.SearchPlugin; +import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.CardinalityUpperBound; import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder; @@ -33,7 +36,9 @@ import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Set; import java.util.function.Consumer; +import java.util.stream.Collectors; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -129,6 +134,16 @@ public void testCartesianShapeBoundsLicenseCheck() { }, "cartesian_bounds", "shape"); } + public void testGenericNamedWriteables() { + SearchModule module = new SearchModule(Settings.EMPTY, List.of(new SpatialPlugin())); + Set names = module.getNamedWriteables() + .stream() + .filter(e -> e.categoryClass.equals(GenericNamedWriteable.class)) + .map(e -> e.name) + .collect(Collectors.toSet()); + assertThat("Expect both Geo and Cartesian BoundingBox", names, equalTo(Set.of("GeoBoundingBox", "CartesianBoundingBox"))); + } + private SpatialPlugin getPluginWithOperationMode(License.OperationMode operationMode) { return new SpatialPlugin() { protected XPackLicenseState getLicenseState() { diff --git a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java index 0e5d8e9a69a35..18f33ec0d6845 100644 --- a/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java +++ b/x-pack/plugin/spatial/src/test/java/org/elasticsearch/xpack/spatial/index/fielddata/LatLonGeometryRelationVisitorTests.java @@ -17,9 +17,15 @@ import org.elasticsearch.common.geo.GeometryNormalizer; import org.elasticsearch.common.geo.Orientation; import org.elasticsearch.geo.GeometryTestUtils; +import org.elasticsearch.geometry.Circle; import org.elasticsearch.geometry.Geometry; +import org.elasticsearch.geometry.GeometryCollection; +import org.elasticsearch.geometry.GeometryVisitor; import org.elasticsearch.geometry.LinearRing; +import org.elasticsearch.geometry.MultiLine; import org.elasticsearch.geometry.MultiPoint; +import org.elasticsearch.geometry.MultiPolygon; +import org.elasticsearch.geometry.Rectangle; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.spatial.util.GeoTestUtils; @@ -135,6 +141,18 @@ public void testContainedPolygons() throws Exception { } } + /** Explicitly test failure found in #98063 */ + public void testOriginPointInMultipoint() throws Exception { + ArrayList points = new ArrayList<>(); + points.add(new org.elasticsearch.geometry.Point(0.0, 0.0)); + points.add(new org.elasticsearch.geometry.Point(0.0, 0.0)); + points.add(new org.elasticsearch.geometry.Point(0.0, 1.401298464324817E-45)); + Geometry geometry = new MultiPoint(points); + GeoShapeValues.GeoShapeValue geoShapeValue = GeoTestUtils.geoShapeValue(geometry); + GeometryDocValueReader reader = GeoTestUtils.geometryDocValueReader(geometry, CoordinateEncoder.GEO); + doTestShape(geometry, geoShapeValue, reader, new Point(0, 0)); + } + private void doTestShapes(Supplier supplier) throws Exception { Geometry geometry = GeometryNormalizer.apply(Orientation.CCW, GeometryTestUtils.randomGeometryWithoutCircle(0, false)); GeoShapeValues.GeoShapeValue geoShapeValue = GeoTestUtils.geoShapeValue(geometry); @@ -161,18 +179,7 @@ private void doTestShape(Geometry geometry, GeometryDocValueReader reader, LatLo private boolean isIdenticalPoint(Geometry geometry, LatLonGeometry latLonGeometry) { if (latLonGeometry instanceof Point latLonPoint) { - if (geometry instanceof org.elasticsearch.geometry.Point point) { - return encodeLatitude(point.getLat()) == encodeLatitude(latLonPoint.getLat()) - && encodeLongitude(point.getLon()) == encodeLongitude(latLonPoint.getLon()); - } else if (geometry instanceof org.elasticsearch.geometry.Line line) { - for (int i = 0; i < line.length(); i++) { - if (encodeLatitude(line.getLat(i)) != encodeLatitude(latLonPoint.getLat()) - || encodeLongitude(line.getLon(i)) != encodeLongitude(latLonPoint.getLon())) { - return false; - } - } - return true; - } + return geometry.visit(new TestIdenticalPointVisitor(latLonPoint)); } return false; } @@ -280,4 +287,113 @@ private double quantizeLat(double lat) { private double quantizeLon(double lon) { return decodeLongitude(encodeLongitude(lon)); } + + /** + * This visitor returns false if any point in the geometry is not identical to the provided point. + * Identical means that the encoded lat and lon values are the same. + */ + private static class TestIdenticalPointVisitor implements GeometryVisitor { + private final int encodedLat; + private final int encodedLon; + + private TestIdenticalPointVisitor(Point latLonPoint) { + encodedLat = encodeLatitude(latLonPoint.getLat()); + encodedLon = encodeLongitude(latLonPoint.getLon()); + } + + private boolean isIdenticalPoint(double lat, double lon) { + return encodeLatitude(lat) == encodedLat && encodeLongitude(lon) == encodedLon; + } + + @Override + public Boolean visit(Circle circle) { + if (circle.getRadiusMeters() == 0) { + return isIdenticalPoint(circle.getLat(), circle.getLon()); + } + return false; + } + + @Override + public Boolean visit(GeometryCollection collection) { + for (Geometry shape : collection) { + if (shape.visit(this) == false) { + return false; + } + } + return collection.size() > 0; + } + + @Override + public Boolean visit(org.elasticsearch.geometry.Line line) { + for (int i = 0; i < line.length(); i++) { + if (isIdenticalPoint(line.getLat(i), line.getLon(i)) == false) { + return false; + } + } + return line.length() > 0; + } + + @Override + public Boolean visit(LinearRing ring) { + return visit((org.elasticsearch.geometry.Line) ring); + } + + @Override + public Boolean visit(MultiLine multiLine) { + for (org.elasticsearch.geometry.Line line : multiLine) { + if (visit(line) == false) { + return false; + } + } + return multiLine.size() > 0; + } + + @Override + public Boolean visit(MultiPoint multiPoint) { + for (org.elasticsearch.geometry.Point point : multiPoint) { + if (visit(point) == false) { + return false; + } + } + return multiPoint.size() > 0; + } + + @Override + public Boolean visit(MultiPolygon multiPolygon) { + for (org.elasticsearch.geometry.Polygon polygon : multiPolygon) { + if (visit(polygon) == false) { + return false; + } + } + return multiPolygon.size() > 0; + } + + @Override + public Boolean visit(org.elasticsearch.geometry.Point point) { + return isIdenticalPoint(point.getLat(), point.getLon()); + } + + @Override + public Boolean visit(org.elasticsearch.geometry.Polygon polygon) { + if (visit(polygon.getPolygon()) == false) { + return false; + } + for (int i = 0; i < polygon.getNumberOfHoles(); i++) { + LinearRing hole = polygon.getHole(i); + if (visit(hole) == false) { + return false; + } + } + return polygon.getPolygon().length() > 0; + } + + @Override + public Boolean visit(Rectangle rectangle) { + int eMinX = encodeLongitude(rectangle.getMinX()); + int eMaxX = encodeLongitude(rectangle.getMaxX()); + int eMinY = encodeLatitude(rectangle.getMinY()); + int eMaxY = encodeLatitude(rectangle.getMaxY()); + return eMinX == eMaxX && eMinY == eMaxY && isIdenticalPoint(rectangle.getMinLat(), rectangle.getMinLon()); + } + } } diff --git a/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlCompatIT.java b/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlCompatIT.java index 1ca89f87d7345..5bc7e0dd219ca 100644 --- a/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlCompatIT.java +++ b/x-pack/plugin/sql/qa/mixed-node/src/javaRestTest/java/org/elasticsearch/xpack/sql/qa/mixed_node/SqlCompatIT.java @@ -9,6 +9,7 @@ import org.apache.http.HttpHost; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.client.Request; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -129,7 +130,7 @@ public static String sqlQueryEntityWithOptionalMode(Map fields) } public void testHistoricCursorFromOldNodeFailsOnNewNode() throws IOException { - assumeTrue("BwC checks only enabled for <=8.7.0", bwcVersion.before(TransportVersion.V_8_8_0)); + assumeTrue("BwC checks only enabled for <=8.7.0", bwcVersion.before(TransportVersions.V_8_8_0)); assertCursorNotCompatibleAcrossVersions(bwcVersion, oldNodesClient, TransportVersion.current(), newNodesClient); } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java index 2caa14aa40295..e4023a89d31ac 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/AbstractSqlQueryRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.CompositeIndicesRequest; import org.elasticsearch.common.Strings; @@ -427,16 +427,16 @@ public AbstractSqlQueryRequest runtimeMappings(Map runtimeMappin public AbstractSqlQueryRequest(StreamInput in) throws IOException { super(in); query = in.readString(); - params = in.readList(AbstractSqlQueryRequest::readSqlTypedParamValue); + params = in.readCollectionAsList(AbstractSqlQueryRequest::readSqlTypedParamValue); zoneId = in.readZoneId(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { catalog = in.readOptionalString(); } fetchSize = in.readVInt(); requestTimeout = in.readTimeValue(); pageTimeout = in.readTimeValue(); filter = in.readOptionalNamedWriteable(QueryBuilder.class); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { runtimeMappings = in.readMap(); } } @@ -457,14 +457,14 @@ public void writeTo(StreamOutput out) throws IOException { out.writeString(query); out.writeCollection(params, AbstractSqlQueryRequest::writeSqlTypedParamValue); out.writeZoneId(zoneId); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_16_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_16_0)) { out.writeOptionalString(catalog); } out.writeVInt(fetchSize); out.writeTimeValue(requestTimeout); out.writeTimeValue(pageTimeout); out.writeOptionalNamedWriteable(filter); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_13_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_13_0)) { out.writeGenericMap(runtimeMappings); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java index 5369be6f2c0f5..f24d1ba800987 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryRequest.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.sql.action; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -160,12 +160,12 @@ public SqlQueryRequest(StreamInput in) throws IOException { fieldMultiValueLeniency = in.readBoolean(); indexIncludeFrozen = in.readBoolean(); binaryCommunication = in.readOptionalBoolean(); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { this.waitForCompletionTimeout = in.readOptionalTimeValue(); this.keepOnCompletion = in.readBoolean(); this.keepAlive = in.readOptionalTimeValue(); } - allowPartialSearchResults = in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0) && in.readBoolean(); + allowPartialSearchResults = in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) && in.readBoolean(); } /** @@ -294,12 +294,12 @@ public void writeTo(StreamOutput out) throws IOException { out.writeBoolean(fieldMultiValueLeniency); out.writeBoolean(indexIncludeFrozen); out.writeOptionalBoolean(binaryCommunication); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { out.writeOptionalTimeValue(waitForCompletionTimeout); out.writeBoolean(keepOnCompletion); out.writeOptionalTimeValue(keepAlive); } - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeBoolean(allowPartialSearchResults); } } diff --git a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java index 9ea0e4b0e889a..637b554847dfc 100644 --- a/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java +++ b/x-pack/plugin/sql/sql-action/src/main/java/org/elasticsearch/xpack/sql/action/SqlQueryResponse.java @@ -8,7 +8,7 @@ import com.fasterxml.jackson.core.JsonGenerator; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; @@ -83,7 +83,7 @@ public SqlQueryResponse(StreamInput in) throws IOException { } } this.rows = unmodifiableList(rows); - if (in.getTransportVersion().onOrAfter(TransportVersion.V_7_14_0)) { + if (in.getTransportVersion().onOrAfter(TransportVersions.V_7_14_0)) { columnar = in.readBoolean(); asyncExecutionId = in.readOptionalString(); isPartial = in.readBoolean(); diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java index 2eae1dab13135..456067fba6b04 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/common/io/SqlStreamInput.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.sql.common.io; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.compress.CompressorFactory; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; @@ -43,7 +44,7 @@ public static SqlStreamInput fromString(String base64encoded, NamedWriteableRegi * using TransportVersion. */ private static void validateStreamVersion(TransportVersion version, TransportVersion cursorVersion) { - if (cursorVersion.before(TransportVersion.V_8_8_0) && version.equals(cursorVersion) == false) { + if (cursorVersion.before(TransportVersions.V_8_8_0) && version.equals(cursorVersion) == false) { throw new SqlIllegalArgumentException("Unsupported cursor version [{}], expected [{}]", cursorVersion, version); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java index cc3814970493f..b337bcbf6af3f 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/CompositeAggCursor.java @@ -80,7 +80,7 @@ public CompositeAggCursor(StreamInput in) throws IOException { nextQuery = new SearchSourceBuilder(in); limit = in.readVInt(); - extractors = in.readNamedWriteableList(BucketExtractor.class); + extractors = in.readNamedWriteableCollectionAsList(BucketExtractor.class); mask = BitSet.valueOf(in.readByteArray()); includeFrozen = in.readBoolean(); } @@ -91,7 +91,7 @@ public void writeTo(StreamOutput out) throws IOException { nextQuery.writeTo(out); out.writeVInt(limit); - out.writeNamedWriteableList(extractors); + out.writeNamedWriteableCollection(extractors); out.writeByteArray(mask.toByteArray()); out.writeBoolean(includeFrozen); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java index cf85681f58473..36a42aaad7161 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/execution/search/SearchHitCursor.java @@ -8,7 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.search.SearchRequest; import org.elasticsearch.action.search.SearchResponse; @@ -66,10 +66,10 @@ public SearchHitCursor(StreamInput in) throws IOException { nextQuery = new SearchSourceBuilder(in); limit = in.readVInt(); - extractors = in.readNamedWriteableList(HitExtractor.class); + extractors = in.readNamedWriteableCollectionAsList(HitExtractor.class); mask = BitSet.valueOf(in.readByteArray()); includeFrozen = in.readBoolean(); - allowPartialSearchResults = in.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0) && in.readBoolean(); + allowPartialSearchResults = in.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0) && in.readBoolean(); } @Override @@ -77,10 +77,10 @@ public void writeTo(StreamOutput out) throws IOException { nextQuery.writeTo(out); out.writeVInt(limit); - out.writeNamedWriteableList(extractors); + out.writeNamedWriteableCollection(extractors); out.writeByteArray(mask.toByteArray()); out.writeBoolean(includeFrozen); - if (out.getTransportVersion().onOrAfter(TransportVersion.V_8_3_0)) { + if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_3_0)) { out.writeBoolean(allowPartialSearchResults); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CaseProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CaseProcessor.java index 6a73281be17e0..46a10617a8fe7 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CaseProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/CaseProcessor.java @@ -25,7 +25,7 @@ public CaseProcessor(List processors) { } public CaseProcessor(StreamInput in) throws IOException { - processors = in.readNamedWriteableList(Processor.class); + processors = in.readNamedWriteableCollectionAsList(Processor.class); } @Override @@ -35,7 +35,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(processors); + out.writeNamedWriteableCollection(processors); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalProcessor.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalProcessor.java index a4e82b739eb1e..35e2b738c9fb1 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalProcessor.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/expression/predicate/conditional/ConditionalProcessor.java @@ -59,7 +59,7 @@ public ConditionalProcessor(List processors, ConditionalOperation ope } public ConditionalProcessor(StreamInput in) throws IOException { - processors = in.readNamedWriteableList(Processor.class); + processors = in.readNamedWriteableCollectionAsList(Processor.class); operation = in.readEnum(ConditionalOperation.class); } @@ -70,7 +70,7 @@ public String getWriteableName() { @Override public void writeTo(StreamOutput out) throws IOException { - out.writeNamedWriteableList(processors); + out.writeNamedWriteableCollection(processors); out.writeEnum(operation); } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java index 17dd20ec6aca1..516aaa961b5b3 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/ShowColumns.java @@ -12,6 +12,7 @@ import org.elasticsearch.xpack.ql.expression.FieldAttribute; import org.elasticsearch.xpack.ql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.ql.index.IndexCompatibility; +import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -80,15 +81,22 @@ public void execute(SqlSession session, ActionListener listener) { idx = hasText(cat) && cat.equals(cluster) == false ? buildRemoteIndexName(cat, idx) : idx; boolean withFrozen = includeFrozen || session.configuration().includeFrozen(); - session.indexResolver().resolveAsMergedMapping(idx, withFrozen, emptyMap(), listener.delegateFailureAndWrap((l, indexResult) -> { - List> rows = emptyList(); - if (indexResult.isValid()) { - rows = new ArrayList<>(); - Version version = Version.fromId(session.configuration().version().id); - fillInRows(IndexCompatibility.compatible(indexResult, version).get().mapping(), null, rows); - } - l.onResponse(of(session, rows)); - })); + session.indexResolver() + .resolveAsMergedMapping( + idx, + IndexResolver.ALL_FIELDS, + withFrozen, + emptyMap(), + listener.delegateFailureAndWrap((l, indexResult) -> { + List> rows = emptyList(); + if (indexResult.isValid()) { + rows = new ArrayList<>(); + Version version = Version.fromId(session.configuration().version().id); + fillInRows(IndexCompatibility.compatible(indexResult, version).get().mapping(), null, rows); + } + l.onResponse(of(session, rows)); + }) + ); } static void fillInRows(Map mapping, String prefix, List> rows) { diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java index 5dbffcfb8bab6..9f57df5190dcc 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumns.java @@ -14,6 +14,7 @@ import org.elasticsearch.xpack.ql.expression.predicate.regex.LikePattern; import org.elasticsearch.xpack.ql.index.EsIndex; import org.elasticsearch.xpack.ql.index.IndexCompatibility; +import org.elasticsearch.xpack.ql.index.IndexResolver; import org.elasticsearch.xpack.ql.tree.NodeInfo; import org.elasticsearch.xpack.ql.tree.Source; import org.elasticsearch.xpack.ql.type.DataType; @@ -177,22 +178,28 @@ public void execute(SqlSession session, ActionListener listener) { // otherwise use a merged mapping else { session.indexResolver() - .resolveAsMergedMapping(indexPattern, includeFrozen, emptyMap(), listener.delegateFailureAndWrap((delegate, r) -> { - List> rows = new ArrayList<>(); - // populate the data only when a target is found - if (r.isValid()) { - fillInRows( - tableCat, - indexName, - IndexCompatibility.compatible(r, version).get().mapping(), - null, - rows, - columnMatcher, - mode - ); - } - delegate.onResponse(ListCursor.of(Rows.schema(output), rows, session.configuration().pageSize())); - })); + .resolveAsMergedMapping( + indexPattern, + IndexResolver.ALL_FIELDS, + includeFrozen, + emptyMap(), + listener.delegateFailureAndWrap((delegate, r) -> { + List> rows = new ArrayList<>(); + // populate the data only when a target is found + if (r.isValid()) { + fillInRows( + tableCat, + indexName, + IndexCompatibility.compatible(r, version).get().mapping(), + null, + rows, + columnMatcher, + mode + ); + } + delegate.onResponse(ListCursor.of(Rows.schema(output), rows, session.configuration().pageSize())); + }) + ); } } diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java index 35cb0e24faf69..335b17555242c 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/plugin/SqlStatsResponse.java @@ -33,12 +33,12 @@ public SqlStatsResponse(ClusterName clusterName, List nodes, @Override protected List readNodesFrom(StreamInput in) throws IOException { - return in.readList(NodeStatsResponse::readNodeResponse); + return in.readCollectionAsList(NodeStatsResponse::readNodeResponse); } @Override protected void writeNodesTo(StreamOutput out, List nodes) throws IOException { - out.writeList(nodes); + out.writeCollection(nodes); } @Override diff --git a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java index e7c254ab9c826..040807981a389 100644 --- a/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java +++ b/x-pack/plugin/sql/src/main/java/org/elasticsearch/xpack/sql/session/SqlSession.java @@ -164,6 +164,7 @@ private void preAnalyze(LogicalPlan parsed, Function act boolean includeFrozen = configuration.includeFrozen() || tableInfo.isFrozen(); indexResolver.resolveAsMergedMapping( indexPattern, + IndexResolver.ALL_FIELDS, includeFrozen, configuration.runtimeMappings(), listener.delegateFailureAndWrap((l, indexResult) -> l.onResponse(action.apply(indexResult))) diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/common/io/SqlStreamTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/common/io/SqlStreamTests.java index b46897e97804f..b5f23f6ab7abb 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/common/io/SqlStreamTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/common/io/SqlStreamTests.java @@ -9,6 +9,7 @@ import org.apache.lucene.util.BytesRef; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.common.io.stream.InputStreamStreamInput; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.test.ESTestCase; @@ -68,7 +69,7 @@ public void testOldCursorProducesVersionMismatchError() { } public void testVersionCanBeReadByOldNodes() throws IOException { - TransportVersion version = randomFrom(TransportVersion.V_7_0_0, TransportVersion.V_7_2_1, TransportVersion.V_8_1_0); + TransportVersion version = randomFrom(TransportVersions.V_7_0_0, TransportVersions.V_7_2_1, TransportVersions.V_8_1_0); SqlStreamOutput out = SqlStreamOutput.create(version, randomZone()); out.writeString("payload"); out.close(); diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java index d6807689ec23e..e0af40bbc4ce5 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plan/logical/command/sys/SysColumnsTests.java @@ -52,6 +52,7 @@ import static org.elasticsearch.xpack.sql.types.SqlTypesTests.loadMapping; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -397,9 +398,9 @@ private Tuple sql( when(resolver.clusterName()).thenReturn(CLUSTER_NAME); when(resolver.remoteClusters()).thenReturn(Set.of(CLUSTER_NAME)); doAnswer(invocation -> { - ((ActionListener) invocation.getArguments()[3]).onResponse(IndexResolution.valid(test)); + ((ActionListener) invocation.getArguments()[4]).onResponse(IndexResolution.valid(test)); return Void.TYPE; - }).when(resolver).resolveAsMergedMapping(any(), anyBoolean(), any(), any()); + }).when(resolver).resolveAsMergedMapping(any(), eq(IndexResolver.ALL_FIELDS), anyBoolean(), any(), any()); doAnswer(invocation -> { ((ActionListener>) invocation.getArguments()[4]).onResponse(singletonList(test)); return Void.TYPE; diff --git a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java index 04149801266d0..726b40616e2d4 100644 --- a/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java +++ b/x-pack/plugin/sql/src/test/java/org/elasticsearch/xpack/sql/plugin/CursorTests.java @@ -7,6 +7,7 @@ package org.elasticsearch.xpack.sql.plugin; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.client.internal.Client; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -56,7 +57,7 @@ public void testHistoricVersionHandling() { TransportVersion otherVersion = TransportVersionUtils.randomVersionBetween( random(), TransportVersionUtils.getFirstVersion(), - TransportVersion.V_8_7_0 + TransportVersions.V_8_7_0 ); String encodedWithWrongVersion = encodeToString(cursor, otherVersion, randomZone()); @@ -126,7 +127,7 @@ public void testAttachingFormatterToCursorFromOtherVersion() { TransportVersion version = TransportVersionUtils.randomVersionBetween( random(), TransportVersionUtils.getFirstVersion(), - TransportVersion.V_8_7_0 + TransportVersions.V_8_7_0 ); String encoded = encodeToString(cursor, version, zone); diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/graph/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/graph/10_basic.yml index 29e67b3f53479..b001087f9f775 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/graph/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/graph/10_basic.yml @@ -7,7 +7,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: keys: @@ -33,7 +33,8 @@ setup: - do: cluster.health: index: test_1 - wait_for_status: green + wait_for_status: yellow + wait_for_no_initializing_shards: true - do: graph.explore: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/search-business-rules/10_pinned_query.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/search-business-rules/10_pinned_query.yml index 82ac335e9a1e0..5726d75422e21 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/search-business-rules/10_pinned_query.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/search-business-rules/10_pinned_query.yml @@ -78,3 +78,49 @@ setup: - match: { hits.hits.2._index: "test1" } - match: { hits.hits.3._id: "b" } +--- +"Test pinned query with docs": + - skip: + version: " - 8.10.99" + reason: "'_index' was made optional in 8.11" + - do: + search: + index: test1,test2 + body: + query: + pinned: + docs: + - { _id: a } + - { _id: c, _index: test2 } + organic: + match: + title: + query: "title" + + - match: { hits.total.value: 4 } + + - match: { hits.hits.0._id: "a" } + - match: { hits.hits.1._id: "a" } + - match: { hits.hits.2._id: "c" } + - match: { hits.hits.2._index: "test2" } + - match: { hits.hits.3._id: "b" } + +--- +"Test pinned query with docs and no index fail in previous versions": + - skip: + version: "8.10.99 - " + reason: "'_index' was made optional in 8.11, this test is for explicit failure tests" + - do: + catch: bad_request + search: + index: test1,test2 + body: + query: + pinned: + docs: + - { _id: a } + - { _id: c, _index: test2 } + organic: + match: + title: + query: "title" diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml index dab154917b17b..8a3de7cc4b855 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/10_index_doc.yml @@ -37,7 +37,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -50,7 +50,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -62,7 +62,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -75,7 +75,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -235,7 +235,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "only_read", "_id": "13"}}' - '{"name": "doc13"}' @@ -246,6 +245,10 @@ teardown: - match: { items.0.index.error.type: "security_exception" } - match: { items.1.index.status: 201 } + - do: # superuser + indices.refresh: + index: only_index + - do: # superuser search: rest_total_hits_as_int: true diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml index 5c19aa3bbfcad..094dc6fa01097 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/11_delete_doc.yml @@ -37,7 +37,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -50,7 +50,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -62,7 +62,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -75,7 +75,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -138,7 +138,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user delete: - refresh: true index: only_delete id: "3" @@ -152,7 +151,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"delete": {"_index": "only_delete", "_id": "4"}}' - '{"delete": {"_index": "everything" , "_id": "9"}}' @@ -160,7 +158,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - '{"delete": {"_index": "only_delete", "_id": "5"}}' - '{"delete": {"_index": "only_delete", "_id": "5"}}' @@ -177,6 +174,10 @@ teardown: _index: everything _id: "10" + - do: # superuser + indices.refresh: + index: only_delete + - do: # superuser search: rest_total_hits_as_int: true @@ -197,7 +198,6 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user delete: - refresh: true index: only_read id: "1" @@ -205,14 +205,12 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user delete: - refresh: true index: only_index id: "2" - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"delete": {"_index": "only_read" , "_id": "1"}}' - '{"delete": {"_index": "only_index", "_id": "2"}}' @@ -226,7 +224,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - '{"delete": {"_index": "only_read" , "_id": "1"}}' - '{"delete": {"_index": "only_read" , "_id": "1"}}' @@ -240,7 +237,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: # The rest test won't send streaming content unless it has multiple bodies, so we send the same delete twice - '{"delete": {"_index": "only_index", "_id": "2"}}' - '{"delete": {"_index": "only_index", "_id": "2"}}' @@ -251,6 +247,10 @@ teardown: - match: { items.1.delete.status: 403 } - match: { items.1.delete.error.type: "security_exception" } + - do: # superuser + indices.refresh: + index: only_read,only_index + - do: # superuser search: rest_total_hits_as_int: true @@ -270,7 +270,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"delete": {"_index": "only_read" , "_id": "1"}}' - '{"delete": {"_index": "only_delete", "_id": "6"}}' @@ -279,6 +278,10 @@ teardown: - match: { items.0.delete.error.type: "security_exception" } - match: { items.1.delete.status: 200 } + - do: # superuser + indices.refresh: + index: only_read,only_delete + - do: # superuser search: rest_total_hits_as_int: true @@ -298,7 +301,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index" : {"_index": "only_delete", "_id": "11"}}' - '{"name" : "doc11"}' @@ -314,6 +316,10 @@ teardown: - match: { items.3.delete.status: 403 } - match: { items.3.delete.error.type: "security_exception" } + - do: # superuser + indices.refresh: + index: only_delete,only_index + - do: # superuser search: rest_total_hits_as_int: true diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml index e1901ced2817e..8c0ba52f23236 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/12_index_alias.yml @@ -35,7 +35,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -48,7 +48,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -61,7 +61,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -147,7 +147,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_write_1", "_id": "3"}}' - '{"name": "doc3"}' @@ -157,7 +156,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_write_1", "_id": "5"}}' - '{"name": "doc5"}' @@ -167,7 +165,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_write_1", "_id": "7"}}' - '{"name": "doc7"}' @@ -176,6 +173,10 @@ teardown: - '{"index": {"_index": "can_write_3", "_id": "9"}}' - '{"name": "doc9"}' + - do: # superuser + indices.refresh: + index: write_index_* + - do: # superuser search: rest_total_hits_as_int: true @@ -194,7 +195,6 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user create: - refresh: true id: "7" index: can_read_1 body: > @@ -206,7 +206,6 @@ teardown: catch: forbidden headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user create: - refresh: true id: "8" index: can_read_2 body: > @@ -217,7 +216,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "9"}}' - '{"name": "doc9"}' @@ -232,7 +230,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "11"}}' - '{"name": "doc11"}' @@ -244,6 +241,10 @@ teardown: - match: { items.1.index.status: 403 } - match: { items.1.index.error.type: "security_exception" } + - do: # superuser + indices.refresh: + index: read_index + - do: # superuser search: rest_total_hits_as_int: true @@ -255,7 +256,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "13"}}' - '{"name": "doc13"}' @@ -266,6 +266,10 @@ teardown: - match: { items.0.index.error.type: "security_exception" } - match: { items.1.index.status: 201 } + - do: # superuser + indices.refresh: + index: write_index_1 + - do: # superuser search: rest_total_hits_as_int: true @@ -276,7 +280,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "15"}}' - '{"name": "doc15"}' @@ -297,6 +300,10 @@ teardown: - match: { items.3.index.status: 201 } - match: { items.4.index.status: 201 } + - do: # superuser + indices.refresh: + index: write_index_* + - do: # superuser search: rest_total_hits_as_int: true @@ -336,7 +343,6 @@ teardown: - do: headers: { Authorization: "Basic dGVzdF91c2VyOngtcGFjay10ZXN0LXBhc3N3b3Jk" } # test_user bulk: - refresh: true body: - '{"index": {"_index": "can_read_1", "_id": "20"}}' - '{"name": "doc20"}' @@ -363,6 +369,10 @@ teardown: - match: { items.5.update.status: 200 } - match: { items.6.delete.status: 200 } + - do: # superuser + indices.refresh: + index: write_index_* + - do: # superuser search: rest_total_hits_as_int: true diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml index 34bb3f58d1901..978cf84983190 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/14_cat_indices.yml @@ -164,7 +164,7 @@ teardown: - match: $body: | - /^(yellow \s+ + /^((yellow|green) \s+ close \s+ index_to_monitor \s+ ([a-zA-Z0-9=/_+]|[\\\-]){22} \s+ diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml index 6e22bb4b8b43e..771920c4b13f4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/20_get_doc.yml @@ -38,7 +38,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -51,7 +51,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -63,7 +63,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -75,7 +75,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -87,7 +87,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml index 7bba2a7617a16..56ade8918efe4 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/21_search_doc.yml @@ -38,7 +38,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -52,7 +52,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -66,7 +66,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -80,7 +80,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: @@ -94,7 +94,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 mappings: properties: name: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml index 7b50942478751..921486ba2d220 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/30_dynamic_put_mapping.yml @@ -34,7 +34,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 - do: indices.put_alias: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml index 73979883291c3..6d3a014f8b97b 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/31_rollover_using_alias.yml @@ -35,7 +35,7 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 + number_of_replicas: 1 - do: indices.put_alias: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml index f9a6147533e8e..8044e9bc3b8ab 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/security/authz/70_tsdb.yml @@ -232,7 +232,6 @@ create_doc permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"create": {}}' @@ -242,7 +241,6 @@ create_doc permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user index: - refresh: true index: test body: "@timestamp": "2021-04-28T23:51:03.142Z" @@ -257,6 +255,16 @@ create_doc permission can create: rx: 430605511 - match: { _version: 1 } + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") + 2 more (above) + - match: { hits.total.value: 10 } + - match: { hits.total.relation: "eq" } + --- create_doc permission can't overwrite: - skip: @@ -290,7 +298,6 @@ create_doc permission can't overwrite: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"index": {}}' @@ -301,7 +308,6 @@ create_doc permission can't overwrite: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user catch: "/is\ unauthorized\ for\ user\ \\[limited\\]/" index: - refresh: true index: test op_type: index body: @@ -316,6 +322,16 @@ create_doc permission can't overwrite: tx: 111434595272 rx: 430605511 + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") but no more (above) + - match: { hits.total.value: 8 } + - match: { hits.total.relation: "eq" } + --- index permission can create: - skip: @@ -349,7 +365,6 @@ index permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"create": {}}' @@ -359,7 +374,6 @@ index permission can create: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user index: - refresh: true index: test body: "@timestamp": "2021-04-28T23:51:03.142Z" @@ -374,6 +388,16 @@ index permission can create: rx: 430605511 - match: { _version: 1 } + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") + 2 more (above) + - match: { hits.total.value: 10 } + - match: { hits.total.relation: "eq" } + --- index permission can overwrite: - skip: @@ -407,7 +431,6 @@ index permission can overwrite: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user bulk: - refresh: true index: test body: - '{"index": {}}' @@ -417,7 +440,6 @@ index permission can overwrite: - do: headers: { Authorization: "Basic bGltaXRlZDp4LXBhY2stdGVzdC1wYXNzd29yZA==" } # limited - user index: - refresh: true index: test op_type: index body: @@ -432,3 +454,13 @@ index permission can overwrite: tx: 111434595272 rx: 430605511 - match: { _version: 2 } + + - do: # superuser + indices.refresh: + index: test + - do: + search: + index: test + # Original 8 docs ("setup") even though 2 have been overwritten (above) + - match: { hits.total.value: 8 } + - match: { hits.total.relation: "eq" } diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/10_basic.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/10_basic.yml index 4b0509267227e..7709df92ad958 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/10_basic.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/10_basic.yml @@ -123,7 +123,6 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 mappings: properties: foo: @@ -142,7 +141,6 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 mappings: properties: foo: @@ -159,7 +157,6 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 mappings: properties: foo: @@ -175,7 +172,6 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 mappings: properties: foo: @@ -193,7 +189,6 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 mappings: properties: ck: @@ -243,7 +238,13 @@ setup: - do: #superuser cluster.health: index: test_f - wait_for_status: green + wait_for_status: yellow + wait_for_no_initializing_shards: true + + # terms_enum returns 0 results if the search fails, so perform a search here so that any failures are picked up early + - do: + count: + index: test_* --- teardown: diff --git a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/20_fieldtypes.yml b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/20_fieldtypes.yml index 51fd95ba6a80d..751c9d3880d2d 100644 --- a/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/20_fieldtypes.yml +++ b/x-pack/plugin/src/yamlRestTest/resources/rest-api-spec/test/terms_enum/20_fieldtypes.yml @@ -35,7 +35,6 @@ setup: settings: index: number_of_shards: 1 - number_of_replicas: 0 mappings: properties: foo_kw: @@ -66,8 +65,13 @@ setup: - do: #superuser cluster.health: index: test - wait_for_status: green + wait_for_status: yellow + wait_for_no_initializing_shards: true + # terms_enum returns 0 results if the search fails, so perform a search here so that any failures are picked up early + - do: + count: + index: test --- "Test terms enumeration keyword field": diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java index f3935ed75ecd6..169ae7b04787f 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/checkpoint/TransformGetCheckpointTests.java @@ -7,7 +7,7 @@ package org.elasticsearch.xpack.transform.checkpoint; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.LatchedActionListener; @@ -125,7 +125,7 @@ protected void onSendRequest(long requestId, String action, TransportRequest req testIndices = testIndicesList.toArray(new String[0]); clusterStateWithIndex = ClusterState.builder(ClusterStateCreationUtils.state(numberOfNodes, testIndices, numberOfShards)) - .putTransportVersion("node01", TransportVersion.V_8_5_0) + .putTransportVersion("node01", TransportVersions.V_8_5_0) .build(); transformTask = new Task( diff --git a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java index 42247f9481b14..d6141acfd5726 100644 --- a/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java +++ b/x-pack/plugin/transform/src/internalClusterTest/java/org/elasticsearch/xpack/transform/persistence/TransformConfigManagerTests.java @@ -9,7 +9,6 @@ import org.elasticsearch.ResourceAlreadyExistsException; import org.elasticsearch.ResourceNotFoundException; -import org.elasticsearch.Version; import org.elasticsearch.action.admin.indices.create.CreateIndexRequest; import org.elasticsearch.action.admin.indices.refresh.RefreshRequest; import org.elasticsearch.action.get.GetRequest; @@ -28,6 +27,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.Tuple; import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.xcontent.ToXContent; @@ -753,7 +753,7 @@ private static ClusterState createClusterStateWithTransformIndex(String... index IndexMetadata.Builder builder = new IndexMetadata.Builder(index).settings( Settings.builder() .put(TransformInternalIndex.settings(Settings.EMPTY)) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build() ).numberOfReplicas(0).numberOfShards(1).putMapping(Strings.toString(TransformInternalIndex.mappings())); final var indexMetadata = builder.build(); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java new file mode 100644 index 0000000000000..9cccbade339dc --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/DefaultTransformExtension.java @@ -0,0 +1,36 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; + +public class DefaultTransformExtension implements TransformExtension { + + @Override + public boolean includeNodeInfo() { + return true; + } + + @Override + public Settings getTransformInternalIndexAdditionalSettings() { + return Settings.EMPTY; + } + + /** + * Provides destination index settings, hardcoded at the moment. In future this might be customizable or generation could be based on + * source settings. + */ + @Override + public Settings getTransformDestinationIndexSettings() { + return Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") + .build(); + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java index 86cdce7a92b50..6eebc97541123 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/Transform.java @@ -137,6 +137,7 @@ public class Transform extends Plugin implements SystemIndexPlugin, PersistentTa private final Settings settings; private final SetOnce transformServices = new SetOnce<>(); + private final TransformExtension transformExtension = new DefaultTransformExtension(); public static final Integer DEFAULT_INITIAL_MAX_PAGE_SEARCH_SIZE = Integer.valueOf(500); public static final TimeValue DEFAULT_TRANSFORM_FREQUENCY = TimeValue.timeValueSeconds(60); @@ -250,7 +251,12 @@ public Collection createComponents( client, xContentRegistry ); - TransformAuditor auditor = new TransformAuditor(client, clusterService.getNodeName(), clusterService, includeNodeInfo()); + TransformAuditor auditor = new TransformAuditor( + client, + clusterService.getNodeName(), + clusterService, + getTransformExtension().includeNodeInfo() + ); Clock clock = Clock.systemUTC(); TransformCheckpointService checkpointService = new TransformCheckpointService( clock, @@ -264,7 +270,11 @@ public Collection createComponents( transformServices.set(new TransformServices(configManager, checkpointService, auditor, scheduler)); - return Arrays.asList(transformServices.get(), new TransformClusterStateListener(clusterService, client)); + return Arrays.asList( + transformServices.get(), + new TransformClusterStateListener(clusterService, client), + new TransformExtensionHolder(getTransformExtension()) + ); } @Override @@ -285,7 +295,7 @@ public List> getPersistentTasksExecutor( threadPool, clusterService, settingsModule.getSettings(), - getTransformInternalIndexAdditionalSettings(), + getTransformExtension().getTransformInternalIndexAdditionalSettings(), expressionResolver ) ); @@ -354,7 +364,9 @@ public UnaryOperator> getIndexTemplateMetadat @Override public Collection getSystemIndexDescriptors(Settings settings) { try { - return List.of(TransformInternalIndex.getSystemIndexDescriptor(getTransformInternalIndexAdditionalSettings())); + return List.of( + TransformInternalIndex.getSystemIndexDescriptor(getTransformExtension().getTransformInternalIndexAdditionalSettings()) + ); } catch (IOException e) { throw new UncheckedIOException(e); } @@ -467,11 +479,17 @@ public String getFeatureDescription() { return "Manages configuration and state for transforms"; } + public TransformExtension getTransformExtension() { + return transformExtension; + } + + @Deprecated public boolean includeNodeInfo() { - return true; + return getTransformExtension().includeNodeInfo(); } + @Deprecated public Settings getTransformInternalIndexAdditionalSettings() { - return Settings.EMPTY; + return getTransformExtension().getTransformInternalIndexAdditionalSettings(); } } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java new file mode 100644 index 0000000000000..c919f4dd4c550 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtension.java @@ -0,0 +1,23 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform; + +import org.elasticsearch.common.settings.Settings; + +public interface TransformExtension { + + boolean includeNodeInfo(); + + Settings getTransformInternalIndexAdditionalSettings(); + + /** + * Provides destination index settings, hardcoded at the moment. In future this might be customizable or generation could be based on + * source settings. + */ + Settings getTransformDestinationIndexSettings(); +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtensionHolder.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtensionHolder.java new file mode 100644 index 0000000000000..93f285f14cf69 --- /dev/null +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformExtensionHolder.java @@ -0,0 +1,41 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0; you may not use this file except in compliance with the Elastic License + * 2.0. + */ + +package org.elasticsearch.xpack.transform; + +import org.elasticsearch.node.Node; + +import java.util.Objects; + +/** + * Wrapper for the {@link TransformExtension} interface that allows it to be used + * given the way {@link Node} does Guice bindings for plugin components. + * TODO: remove this class entirely once Guice is removed entirely. + */ +public class TransformExtensionHolder { + + private final TransformExtension transformExtension; + + /** + * Used by Guice. + */ + public TransformExtensionHolder() { + this.transformExtension = null; + } + + public TransformExtensionHolder(TransformExtension transformExtension) { + this.transformExtension = Objects.requireNonNull(transformExtension); + } + + public boolean isEmpty() { + return transformExtension == null; + } + + public TransformExtension getTransformExtension() { + return transformExtension; + } +} diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java index 791dc75d5f0b8..98777b47543cb 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java @@ -125,6 +125,7 @@ public static void updateTransform( final boolean dryRun, final boolean checkAccess, final TimeValue timeout, + final Settings destIndexSettings, ActionListener listener ) { // rewrite config into a new format if necessary @@ -185,6 +186,7 @@ public static void updateTransform( destIndexMappings, seqNoPrimaryTermAndIndex, clusterState, + destIndexSettings, ActionListener.wrap(r -> updateTransformListener.onResponse(null), listener::onFailure) ); }, listener::onFailure); @@ -300,6 +302,7 @@ private static void updateTransformConfiguration( Map mappings, SeqNoPrimaryTermAndIndex seqNoPrimaryTermAndIndex, ClusterState clusterState, + Settings destIndexSettings, ActionListener listener ) { // <3> Return to the listener @@ -351,6 +354,7 @@ private static void updateTransformConfiguration( indexNameExpressionResolver, clusterState, config, + destIndexSettings, mappings, createDestinationListener ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java index 9a3aa82c499d9..23bc6406736fe 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetCheckpointAction.java @@ -8,7 +8,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.NoShardAvailableActionException; @@ -106,7 +106,7 @@ private Map> resolveIndicesToPrimaryShards(ClusterState sta } if (shard.assignedToNode() && nodes.get(shard.currentNodeId()) != null) { // special case: The minimum TransportVersion in the cluster is on an old version - if (state.getMinTransportVersion().before(TransportVersion.V_8_2_0)) { + if (state.getMinTransportVersion().before(TransportVersions.V_8_2_0)) { throw new ActionNotFoundTransportException(GetCheckpointNodeAction.NAME); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java index cdaba16182348..a7d7851704054 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPreviewTransformAction.java @@ -50,6 +50,7 @@ import org.elasticsearch.xpack.core.transform.transforms.SyncConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformDestIndexSettings; +import org.elasticsearch.xpack.transform.TransformExtensionHolder; import org.elasticsearch.xpack.transform.persistence.TransformIndex; import org.elasticsearch.xpack.transform.transforms.Function; import org.elasticsearch.xpack.transform.transforms.FunctionFactory; @@ -78,6 +79,7 @@ public class TransportPreviewTransformAction extends HandledTransportAction>> previewListener = ActionListener.wrap(docs -> { if (pipeline == null) { TransformDestIndexSettings generatedDestIndexSettings = TransformIndex.createTransformDestIndexSettings( + destIndexSettings, mappings.get(), transformId, Clock.systemUTC() diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java index d60fb4af651f9..bff969cf0b856 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportResetTransformAction.java @@ -37,6 +37,7 @@ import org.elasticsearch.xpack.core.transform.action.StopTransformAction; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfigUpdate; +import org.elasticsearch.xpack.transform.TransformExtensionHolder; import org.elasticsearch.xpack.transform.TransformServices; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.SeqNoPrimaryTermAndIndex; @@ -58,6 +59,7 @@ public class TransportResetTransformAction extends AcknowledgedTransportMasterNo private final Client client; private final SecurityContext securityContext; private final Settings settings; + private final Settings destIndexSettings; @Inject public TransportResetTransformAction( @@ -68,7 +70,8 @@ public TransportResetTransformAction( IndexNameExpressionResolver indexNameExpressionResolver, TransformServices transformServices, Client client, - Settings settings + Settings settings, + TransformExtensionHolder transformExtensionHolder ) { super( ResetTransformAction.NAME, @@ -87,6 +90,7 @@ public TransportResetTransformAction( ? new SecurityContext(settings, threadPool.getThreadContext()) : null; this.settings = settings; + this.destIndexSettings = transformExtensionHolder.getTransformExtension().getTransformDestinationIndexSettings(); } @Override @@ -131,6 +135,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A false, // dry run false, // check access request.timeout(), + destIndexSettings, updateTransformListener ); }, diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index fa6272a30ae89..8776f112e6178 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.persistent.PersistentTasksCustomMetadata; @@ -40,6 +41,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformState; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskParams; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; +import org.elasticsearch.xpack.transform.TransformExtensionHolder; import org.elasticsearch.xpack.transform.TransformServices; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; import org.elasticsearch.xpack.transform.persistence.AuthorizationStatePersistenceUtils; @@ -62,6 +64,7 @@ public class TransportStartTransformAction extends TransportMasterNodeAction li false, // dryRun true, // checkAccess request.getTimeout(), + destIndexSettings, ActionListener.wrap(updateResult -> { TransformConfig originalConfig = configAndVersion.v1(); TransformConfig updatedConfig = updateResult.getConfig(); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java index 2e28106b77606..6b01f6d7966a0 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpgradeTransformsAction.java @@ -35,6 +35,7 @@ import org.elasticsearch.xpack.core.transform.action.UpgradeTransformsAction.Response; import org.elasticsearch.xpack.core.transform.transforms.TransformConfig; import org.elasticsearch.xpack.core.transform.transforms.TransformConfigUpdate; +import org.elasticsearch.xpack.transform.TransformExtensionHolder; import org.elasticsearch.xpack.transform.TransformServices; import org.elasticsearch.xpack.transform.action.TransformUpdater.UpdateResult; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; @@ -56,6 +57,7 @@ public class TransportUpgradeTransformsAction extends TransportMasterNodeAction< private final Settings settings; private final Client client; private final TransformAuditor auditor; + private final Settings destIndexSettings; @Inject public TransportUpgradeTransformsAction( @@ -66,7 +68,8 @@ public TransportUpgradeTransformsAction( IndexNameExpressionResolver indexNameExpressionResolver, TransformServices transformServices, Client client, - Settings settings + Settings settings, + TransformExtensionHolder transformExtensionHolder ) { super( UpgradeTransformsAction.NAME, @@ -88,6 +91,7 @@ public TransportUpgradeTransformsAction( this.securityContext = XPackSettings.SECURITY_ENABLED.get(settings) ? new SecurityContext(settings, threadPool.getThreadContext()) : null; + this.destIndexSettings = transformExtensionHolder.getTransformExtension().getTransformDestinationIndexSettings(); } @Override @@ -163,6 +167,7 @@ private void updateOneTransform(String id, boolean dryRun, TimeValue timeout, Ac dryRun, false, // check access, timeout, + destIndexSettings, listener ); }, failure -> { diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java index e8ef290bd9db1..39183a1ca8502 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/persistence/TransformIndex.java @@ -23,7 +23,6 @@ import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.settings.Settings; @@ -106,6 +105,7 @@ public static void createDestinationIndex( IndexNameExpressionResolver indexNameExpressionResolver, ClusterState clusterState, TransformConfig config, + Settings destIndexSettings, Map destIndexMappings, ActionListener listener ) { @@ -137,6 +137,7 @@ public static void createDestinationIndex( if (dest.length == 0) { TransformDestIndexSettings generatedDestIndexSettings = createTransformDestIndexSettings( + destIndexSettings, destIndexMappings, config.getId(), Clock.systemUTC() @@ -248,13 +249,16 @@ static void setUpDestinationAliases(Client client, TransformConfig config, Actio ); } - public static TransformDestIndexSettings createTransformDestIndexSettings(Map mappings, String id, Clock clock) { + public static TransformDestIndexSettings createTransformDestIndexSettings( + Settings settings, + Map mappings, + String id, + Clock clock + ) { Map indexMappings = new HashMap<>(); indexMappings.put(PROPERTIES, createMappingsFromStringMap(mappings)); indexMappings.put(META, createMetadata(id, clock)); - Settings settings = createSettings(); - // transform does not create aliases, however the user might customize this in future Set aliases = null; return new TransformDestIndexSettings(indexMappings, settings, aliases); @@ -288,17 +292,6 @@ private static Map createMetadata(String id, Clock clock) { return metadata; } - /** - * creates generated index settings, hardcoded at the moment, in future this might be customizable or generation could - * be based on source settings. - */ - private static Settings createSettings() { - return Settings.builder() // <1> - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_AUTO_EXPAND_REPLICAS, "0-1") - .build(); - } - /** * This takes the a {@code Map} of the type "fieldname: fieldtype" and transforms it into the * typical mapping format. diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java index 0a420c649300d..98939f90e31a2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/scheduling/TransformScheduler.java @@ -92,7 +92,7 @@ public TransformScheduler(Clock clock, ThreadPool threadPool, Settings settings) */ public void start() { if (scheduledFuture == null) { - scheduledFuture = threadPool.scheduleWithFixedDelay(this::processScheduledTasks, schedulerFrequency, ThreadPool.Names.GENERIC); + scheduledFuture = threadPool.scheduleWithFixedDelay(this::processScheduledTasks, schedulerFrequency, threadPool.generic()); } } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java index 6c59f2b34f0c0..27ca18d0e1d2f 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransformUpdaterTests.java @@ -43,6 +43,7 @@ import org.elasticsearch.xpack.core.transform.transforms.TransformStoredDoc; import org.elasticsearch.xpack.core.transform.transforms.TransformTaskState; import org.elasticsearch.xpack.core.transform.utils.TransformConfigVersionUtils; +import org.elasticsearch.xpack.transform.DefaultTransformExtension; import org.elasticsearch.xpack.transform.action.TransformUpdater.UpdateResult; import org.elasticsearch.xpack.transform.notifications.MockTransformAuditor; import org.elasticsearch.xpack.transform.notifications.TransformAuditor; @@ -76,6 +77,7 @@ public class TransformUpdaterTests extends ESTestCase { private ClusterService clusterService = mock(ClusterService.class); private TransformAuditor auditor = new MockTransformAuditor(clusterService); private final Settings settings = Settings.builder().put(XPackSettings.SECURITY_ENABLED.getKey(), true).build(); + private final Settings destIndexSettings = new DefaultTransformExtension().getTransformDestinationIndexSettings(); private static class MyMockClient extends NoOpClient { @@ -157,6 +159,7 @@ public void testTransformUpdateNoAction() throws InterruptedException { false, false, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, listener ), updateResult -> { @@ -192,6 +195,7 @@ public void testTransformUpdateNoAction() throws InterruptedException { false, false, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, listener ), updateResult -> { @@ -264,6 +268,7 @@ public void testTransformUpdateRewrite() throws InterruptedException { false, false, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, listener ), updateResult -> { @@ -331,6 +336,7 @@ public void testTransformUpdateDryRun() throws InterruptedException { true, false, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, listener ), updateResult -> { @@ -378,6 +384,7 @@ public void testTransformUpdateCheckAccessSuccess() throws InterruptedException false, true, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, listener ), updateResult -> { @@ -420,6 +427,7 @@ public void testTransformUpdateCheckAccessFailureDeferValidation() throws Interr false, true, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, listener ), updateResult -> { @@ -454,6 +462,7 @@ public void testTransformUpdateCheckAccessFailureNoDeferValidation() { false, true, AcknowledgedRequest.DEFAULT_ACK_TIMEOUT, + destIndexSettings, ActionListener.wrap( r -> fail("Should fail due to missing privileges"), e -> assertThat(e.getMessage(), is(equalTo("missing privileges"))) diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java index 9f704faf970c8..5afb6db1856fe 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformIndexTests.java @@ -146,7 +146,7 @@ public void testCreateDestinationIndex() throws IOException { TransformIndex.createDestinationIndex( client, TransformConfigTests.randomTransformConfig(TRANSFORM_ID), - TransformIndex.createTransformDestIndexSettings(new HashMap<>(), TRANSFORM_ID, clock), + TransformIndex.createTransformDestIndexSettings(Settings.EMPTY, new HashMap<>(), TRANSFORM_ID, clock), ActionTestUtils.assertNoFailureListener(Assert::assertTrue) ); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java index a6a224c1b053f..cdfdaa546ace6 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/persistence/TransformInternalIndexTests.java @@ -8,7 +8,6 @@ package org.elasticsearch.xpack.transform.persistence; import org.elasticsearch.ResourceAlreadyExistsException; -import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse; import org.elasticsearch.action.admin.indices.create.CreateIndexResponse; @@ -32,6 +31,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -68,7 +68,7 @@ public static ClusterState randomTransformClusterState(boolean shardsReady) { IndexMetadata.Builder builder = new IndexMetadata.Builder(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME).settings( Settings.builder() .put(TransformInternalIndex.settings(Settings.EMPTY)) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build() ).numberOfReplicas(0).numberOfShards(1).putMapping(Strings.toString(TransformInternalIndex.mappings())); indexMapBuilder.put(TransformInternalIndexConstants.LATEST_INDEX_VERSIONED_NAME, builder.build()); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java index 1242627c3e946..f217a475eb0a4 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/notification/slack/message/Attachment.java @@ -162,11 +162,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws builder.field(XField.THUMB_URL.getPreferredName(), thumbUrl); } if (markdownSupportedFields != null) { - builder.startArray(XField.MARKDOWN_IN.getPreferredName()); - for (String field : markdownSupportedFields) { - builder.value(field); - } - builder.endArray(); + builder.array(XField.MARKDOWN_IN.getPreferredName(), markdownSupportedFields); } if (actions != null && actions.isEmpty() == false) { builder.startArray("actions"); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java index c4114acbd2cc5..ffceb054ae54d 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java @@ -7,7 +7,6 @@ package org.elasticsearch.xpack.watcher.watch; -import org.elasticsearch.Version; import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.DataStreamAlias; @@ -18,6 +17,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; import java.util.ArrayList; @@ -137,7 +137,7 @@ private IndexMetadata createIndexMetaData(String indexName, AliasMetadata aliasM .put(IndexMetadata.SETTING_PRIORITY, 5) .put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1) .put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1) - .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT) + .put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), IndexVersion.current()) .build(); indexMetadataBuilder.settings(settings); if (aliasMetadata != null) { diff --git a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java index 727b5ac1b478f..7c7a392a6cf83 100644 --- a/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java +++ b/x-pack/qa/rolling-upgrade/src/test/java/org/elasticsearch/upgrades/TransportVersionClusterStateUpgradeIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.upgrades; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.Version; import org.elasticsearch.client.Request; import org.elasticsearch.common.util.Maps; @@ -24,7 +25,10 @@ public class TransportVersionClusterStateUpgradeIT extends AbstractUpgradeTestCa public void testReadsInferredTransportVersions() throws IOException { assumeTrue("TransportVersion introduced in 8.8.0", UPGRADE_FROM_VERSION.before(Version.V_8_8_0)); - assumeTrue("This only has visible effects when upgrading beyond 8.8.0", TransportVersion.current().after(TransportVersion.V_8_8_0)); + assumeTrue( + "This only has visible effects when upgrading beyond 8.8.0", + TransportVersion.current().after(TransportVersions.V_8_8_0) + ); assumeTrue("Only runs on the mixed cluster", CLUSTER_TYPE == ClusterType.MIXED); // if the master is not upgraded, and the secondary node is, then the cluster info from the secondary // should have inferred transport versions in it @@ -51,7 +55,7 @@ public void testReadsInferredTransportVersions() throws IOException { assertThat( "Node " + ver.getKey() + " should have an inferred transport version", tvs.get(ver.getKey()), - equalTo(TransportVersion.V_8_8_0) + equalTo(TransportVersions.V_8_8_0) ); } } @@ -59,7 +63,10 @@ public void testReadsInferredTransportVersions() throws IOException { public void testCompletesRealTransportVersions() throws IOException { assumeTrue("TransportVersion introduced in 8.8.0", UPGRADE_FROM_VERSION.before(Version.V_8_8_0)); - assumeTrue("This only has visible effects when upgrading beyond 8.8.0", TransportVersion.current().after(TransportVersion.V_8_8_0)); + assumeTrue( + "This only has visible effects when upgrading beyond 8.8.0", + TransportVersion.current().after(TransportVersions.V_8_8_0) + ); assumeTrue("Only runs on the upgraded cluster", CLUSTER_TYPE == ClusterType.UPGRADED); // once everything is upgraded, the master should fill in the real transport versions